diff --git a/.env.sample b/.env.sample index af3207880..9d92216ba 100644 --- a/.env.sample +++ b/.env.sample @@ -36,6 +36,7 @@ COLLABORATION_URL= AWS_ACCESS_KEY_ID=get_a_key_from_aws AWS_SECRET_ACCESS_KEY=get_the_secret_of_above_key AWS_REGION=xx-xxxx-x +AWS_S3_ACCELERATE_URL= AWS_S3_UPLOAD_BUCKET_URL=http://s3:4569 AWS_S3_UPLOAD_BUCKET_NAME=bucket_name_here AWS_S3_UPLOAD_MAX_SIZE=26214400 diff --git a/server/models/Attachment.ts b/server/models/Attachment.ts index 62e3946ac..54b23a9c7 100644 --- a/server/models/Attachment.ts +++ b/server/models/Attachment.ts @@ -10,7 +10,7 @@ import { Table, DataType, } from "sequelize-typescript"; -import { deleteFromS3, getFileByKey } from "@server/utils/s3"; +import { publicS3Endpoint, deleteFromS3, getFileByKey } from "@server/utils/s3"; import Document from "./Document"; import Team from "./Team"; import User from "./User"; @@ -55,6 +55,14 @@ class Attachment extends BaseModel { return getFileByKey(this.key); } + /** + * Use this instead of url which will be deleted soon, the column is unneccessary + * and was not updated with the migraiton to the new s3 bucket. + */ + get canonicalUrl() { + return `${publicS3Endpoint()}/${this.key}`; + } + // hooks @BeforeDestroy diff --git a/server/routes/api/attachments.ts b/server/routes/api/attachments.ts index 96e5febd8..4d24629b0 100644 --- a/server/routes/api/attachments.ts +++ b/server/routes/api/attachments.ts @@ -135,7 +135,7 @@ router.post("attachments.redirect", auth(), async (ctx) => { const accessUrl = await getSignedUrl(attachment.key); ctx.redirect(accessUrl); } else { - ctx.redirect(attachment.url); + ctx.redirect(attachment.canonicalUrl); } }); diff --git a/server/utils/s3.ts b/server/utils/s3.ts index 193f111a5..6b3e70719 100644 --- a/server/utils/s3.ts +++ b/server/utils/s3.ts @@ -6,22 +6,28 @@ import fetch from "fetch-with-proxy"; import { v4 as uuidv4 } from "uuid"; import Logger from "@server/logging/logger"; +const AWS_S3_ACCELERATE_URL = process.env.AWS_S3_ACCELERATE_URL; const AWS_SECRET_ACCESS_KEY = process.env.AWS_SECRET_ACCESS_KEY; const AWS_S3_UPLOAD_BUCKET_URL = process.env.AWS_S3_UPLOAD_BUCKET_URL || ""; const AWS_ACCESS_KEY_ID = process.env.AWS_ACCESS_KEY_ID; const AWS_REGION = process.env.AWS_REGION || ""; const AWS_S3_UPLOAD_BUCKET_NAME = process.env.AWS_S3_UPLOAD_BUCKET_NAME || ""; const AWS_S3_FORCE_PATH_STYLE = process.env.AWS_S3_FORCE_PATH_STYLE !== "false"; + const s3 = new AWS.S3({ + s3BucketEndpoint: AWS_S3_ACCELERATE_URL ? true : undefined, s3ForcePathStyle: AWS_S3_FORCE_PATH_STYLE, accessKeyId: AWS_ACCESS_KEY_ID, secretAccessKey: AWS_SECRET_ACCESS_KEY, region: AWS_REGION, - endpoint: AWS_S3_UPLOAD_BUCKET_URL.includes(AWS_S3_UPLOAD_BUCKET_NAME) + endpoint: AWS_S3_ACCELERATE_URL + ? AWS_S3_ACCELERATE_URL + : AWS_S3_UPLOAD_BUCKET_URL.includes(AWS_S3_UPLOAD_BUCKET_NAME) ? undefined : new AWS.Endpoint(AWS_S3_UPLOAD_BUCKET_URL), signatureVersion: "v4", }); + const createPresignedPost = util.promisify(s3.createPresignedPost).bind(s3); const hmac = ( @@ -116,6 +122,10 @@ export const getPresignedPost = ( }; export const publicS3Endpoint = (isServerUpload?: boolean) => { + if (AWS_S3_ACCELERATE_URL) { + return AWS_S3_ACCELERATE_URL; + } + // lose trailing slash if there is one and convert fake-s3 url to localhost // for access outside of docker containers in local development const isDocker = AWS_S3_UPLOAD_BUCKET_URL.match(/http:\/\/s3:/); @@ -205,9 +215,16 @@ export const getSignedUrl = async (key: string) => { Key: key, Expires: 60, }; - return isDocker + + const url = isDocker ? `${publicS3Endpoint()}/${key}` - : s3.getSignedUrl("getObject", params); + : await s3.getSignedUrlPromise("getObject", params); + + if (AWS_S3_ACCELERATE_URL) { + return url.replace(AWS_S3_UPLOAD_BUCKET_URL, AWS_S3_ACCELERATE_URL); + } + + return url; }; // function assumes that acl is private