feat: Add support for S3 transfer acceleration

This commit is contained in:
Tom Moor
2022-03-02 21:12:38 -08:00
parent 4468d29740
commit 5e96145277
4 changed files with 31 additions and 5 deletions

View File

@@ -36,6 +36,7 @@ COLLABORATION_URL=
AWS_ACCESS_KEY_ID=get_a_key_from_aws
AWS_SECRET_ACCESS_KEY=get_the_secret_of_above_key
AWS_REGION=xx-xxxx-x
AWS_S3_ACCELERATE_URL=
AWS_S3_UPLOAD_BUCKET_URL=http://s3:4569
AWS_S3_UPLOAD_BUCKET_NAME=bucket_name_here
AWS_S3_UPLOAD_MAX_SIZE=26214400

View File

@@ -10,7 +10,7 @@ import {
Table,
DataType,
} from "sequelize-typescript";
import { deleteFromS3, getFileByKey } from "@server/utils/s3";
import { publicS3Endpoint, deleteFromS3, getFileByKey } from "@server/utils/s3";
import Document from "./Document";
import Team from "./Team";
import User from "./User";
@@ -55,6 +55,14 @@ class Attachment extends BaseModel {
return getFileByKey(this.key);
}
/**
* Use this instead of url which will be deleted soon, the column is unneccessary
* and was not updated with the migraiton to the new s3 bucket.
*/
get canonicalUrl() {
return `${publicS3Endpoint()}/${this.key}`;
}
// hooks
@BeforeDestroy

View File

@@ -135,7 +135,7 @@ router.post("attachments.redirect", auth(), async (ctx) => {
const accessUrl = await getSignedUrl(attachment.key);
ctx.redirect(accessUrl);
} else {
ctx.redirect(attachment.url);
ctx.redirect(attachment.canonicalUrl);
}
});

View File

@@ -6,22 +6,28 @@ import fetch from "fetch-with-proxy";
import { v4 as uuidv4 } from "uuid";
import Logger from "@server/logging/logger";
const AWS_S3_ACCELERATE_URL = process.env.AWS_S3_ACCELERATE_URL;
const AWS_SECRET_ACCESS_KEY = process.env.AWS_SECRET_ACCESS_KEY;
const AWS_S3_UPLOAD_BUCKET_URL = process.env.AWS_S3_UPLOAD_BUCKET_URL || "";
const AWS_ACCESS_KEY_ID = process.env.AWS_ACCESS_KEY_ID;
const AWS_REGION = process.env.AWS_REGION || "";
const AWS_S3_UPLOAD_BUCKET_NAME = process.env.AWS_S3_UPLOAD_BUCKET_NAME || "";
const AWS_S3_FORCE_PATH_STYLE = process.env.AWS_S3_FORCE_PATH_STYLE !== "false";
const s3 = new AWS.S3({
s3BucketEndpoint: AWS_S3_ACCELERATE_URL ? true : undefined,
s3ForcePathStyle: AWS_S3_FORCE_PATH_STYLE,
accessKeyId: AWS_ACCESS_KEY_ID,
secretAccessKey: AWS_SECRET_ACCESS_KEY,
region: AWS_REGION,
endpoint: AWS_S3_UPLOAD_BUCKET_URL.includes(AWS_S3_UPLOAD_BUCKET_NAME)
endpoint: AWS_S3_ACCELERATE_URL
? AWS_S3_ACCELERATE_URL
: AWS_S3_UPLOAD_BUCKET_URL.includes(AWS_S3_UPLOAD_BUCKET_NAME)
? undefined
: new AWS.Endpoint(AWS_S3_UPLOAD_BUCKET_URL),
signatureVersion: "v4",
});
const createPresignedPost = util.promisify(s3.createPresignedPost).bind(s3);
const hmac = (
@@ -116,6 +122,10 @@ export const getPresignedPost = (
};
export const publicS3Endpoint = (isServerUpload?: boolean) => {
if (AWS_S3_ACCELERATE_URL) {
return AWS_S3_ACCELERATE_URL;
}
// lose trailing slash if there is one and convert fake-s3 url to localhost
// for access outside of docker containers in local development
const isDocker = AWS_S3_UPLOAD_BUCKET_URL.match(/http:\/\/s3:/);
@@ -205,9 +215,16 @@ export const getSignedUrl = async (key: string) => {
Key: key,
Expires: 60,
};
return isDocker
const url = isDocker
? `${publicS3Endpoint()}/${key}`
: s3.getSignedUrl("getObject", params);
: await s3.getSignedUrlPromise("getObject", params);
if (AWS_S3_ACCELERATE_URL) {
return url.replace(AWS_S3_UPLOAD_BUCKET_URL, AWS_S3_ACCELERATE_URL);
}
return url;
};
// function assumes that acl is private