Files
outline/server/utils/s3.js
Huss 8e2b19dc7a feat: private content (#1137)
* save images as private and serve via signed url from images.info api

* download private images to directory on export

* fix lint errors

* private s3 default, AWS.s3 module level scope, default s3 url expiry

* combine regex to one, and only replace when there are matches

* fix lint

* code not needed anymore, remove

* updates after pulling master

* revert the uploadToS3FromUrl url return

* use model gettr to compact code, rename to attachments api

* basic checking of document read permission to allow attachment viewing

* fix: Continue to upload avatars as public
fix: Allow redirect for non-private attachments

* add support for publicly shared documents

* catch errors which crash the app during zip export and user creation

* add tests

* enable AWS signature v4 for s3

* switch to use factories to build models for testing

* add isDocker flag for local serving of attachment redirect url

* fix redirect tests

Co-authored-by: Tom Moor <tom.moor@gmail.com>
2020-02-12 19:40:44 -08:00

158 lines
4.2 KiB
JavaScript

// @flow
import crypto from 'crypto';
import addHours from 'date-fns/add_hours';
import format from 'date-fns/format';
import AWS from 'aws-sdk';
import invariant from 'invariant';
import fetch from 'isomorphic-fetch';
import bugsnag from 'bugsnag';
const AWS_SECRET_ACCESS_KEY = process.env.AWS_SECRET_ACCESS_KEY;
const AWS_ACCESS_KEY_ID = process.env.AWS_ACCESS_KEY_ID;
const AWS_REGION = process.env.AWS_REGION;
const AWS_S3_UPLOAD_BUCKET_NAME = process.env.AWS_S3_UPLOAD_BUCKET_NAME;
const s3 = new AWS.S3({
s3ForcePathStyle: true,
accessKeyId: process.env.AWS_ACCESS_KEY_ID,
secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY,
endpoint: new AWS.Endpoint(process.env.AWS_S3_UPLOAD_BUCKET_URL),
signatureVersion: 'v4',
});
const hmac = (key: string, message: string, encoding: any) => {
return crypto
.createHmac('sha256', key)
.update(message, 'utf8')
.digest(encoding);
};
export const makeCredential = () => {
const credential =
AWS_ACCESS_KEY_ID +
'/' +
format(new Date(), 'YYYYMMDD') +
'/' +
AWS_REGION +
'/s3/aws4_request';
return credential;
};
export const makePolicy = (
credential: string,
longDate: string,
acl: string
) => {
const tomorrow = addHours(new Date(), 24);
const policy = {
conditions: [
{ bucket: process.env.AWS_S3_UPLOAD_BUCKET_NAME },
['starts-with', '$key', ''],
{ acl },
['content-length-range', 0, +process.env.AWS_S3_UPLOAD_MAX_SIZE],
['starts-with', '$Content-Type', 'image'],
['starts-with', '$Cache-Control', ''],
{ 'x-amz-algorithm': 'AWS4-HMAC-SHA256' },
{ 'x-amz-credential': credential },
{ 'x-amz-date': longDate },
],
expiration: format(tomorrow, 'YYYY-MM-DDTHH:mm:ss\\Z'),
};
return new Buffer(JSON.stringify(policy)).toString('base64');
};
export const getSignature = (policy: any) => {
const kDate = hmac(
'AWS4' + AWS_SECRET_ACCESS_KEY,
format(new Date(), 'YYYYMMDD')
);
const kRegion = hmac(kDate, AWS_REGION);
const kService = hmac(kRegion, 's3');
const kCredentials = hmac(kService, 'aws4_request');
const signature = hmac(kCredentials, policy, 'hex');
return signature;
};
export const publicS3Endpoint = (isServerUpload?: boolean) => {
// lose trailing slash if there is one and convert fake-s3 url to localhost
// for access outside of docker containers in local development
const isDocker = process.env.AWS_S3_UPLOAD_BUCKET_URL.match(/http:\/\/s3:/);
const host = process.env.AWS_S3_UPLOAD_BUCKET_URL.replace(
's3:',
'localhost:'
).replace(/\/$/, '');
return `${host}/${isServerUpload && isDocker ? 's3/' : ''}${
process.env.AWS_S3_UPLOAD_BUCKET_NAME
}`;
};
export const uploadToS3FromUrl = async (
url: string,
key: string,
acl: string
) => {
invariant(AWS_S3_UPLOAD_BUCKET_NAME, 'AWS_S3_UPLOAD_BUCKET_NAME not set');
try {
// $FlowIssue https://github.com/facebook/flow/issues/2171
const res = await fetch(url);
const buffer = await res.buffer();
await s3
.putObject({
ACL: acl,
Bucket: process.env.AWS_S3_UPLOAD_BUCKET_NAME,
Key: key,
ContentType: res.headers['content-type'],
ContentLength: res.headers['content-length'],
ServerSideEncryption: 'AES256',
Body: buffer,
})
.promise();
const endpoint = publicS3Endpoint(true);
return `${endpoint}/${key}`;
} catch (err) {
if (process.env.NODE_ENV === 'production') {
bugsnag.notify(err);
} else {
throw err;
}
}
};
export const getSignedImageUrl = async (key: string) => {
invariant(AWS_S3_UPLOAD_BUCKET_NAME, 'AWS_S3_UPLOAD_BUCKET_NAME not set');
const isDocker = process.env.AWS_S3_UPLOAD_BUCKET_URL.match(/http:\/\/s3:/);
const params = {
Bucket: process.env.AWS_S3_UPLOAD_BUCKET_NAME,
Key: key,
Expires: 60,
};
return isDocker
? `${publicS3Endpoint()}/${key}`
: s3.getSignedUrl('getObject', params);
};
export const getImageByKey = async (key: string) => {
const params = {
Bucket: process.env.AWS_S3_UPLOAD_BUCKET_NAME,
Key: key,
};
try {
const data = await s3.getObject(params).promise();
return data.Body;
} catch (err) {
if (process.env.NODE_ENV === 'production') {
bugsnag.notify(err);
} else {
throw err;
}
}
};