feat: Add support for S3 transfer acceleration
This commit is contained in:
@@ -36,6 +36,7 @@ COLLABORATION_URL=
|
|||||||
AWS_ACCESS_KEY_ID=get_a_key_from_aws
|
AWS_ACCESS_KEY_ID=get_a_key_from_aws
|
||||||
AWS_SECRET_ACCESS_KEY=get_the_secret_of_above_key
|
AWS_SECRET_ACCESS_KEY=get_the_secret_of_above_key
|
||||||
AWS_REGION=xx-xxxx-x
|
AWS_REGION=xx-xxxx-x
|
||||||
|
AWS_S3_ACCELERATE_URL=
|
||||||
AWS_S3_UPLOAD_BUCKET_URL=http://s3:4569
|
AWS_S3_UPLOAD_BUCKET_URL=http://s3:4569
|
||||||
AWS_S3_UPLOAD_BUCKET_NAME=bucket_name_here
|
AWS_S3_UPLOAD_BUCKET_NAME=bucket_name_here
|
||||||
AWS_S3_UPLOAD_MAX_SIZE=26214400
|
AWS_S3_UPLOAD_MAX_SIZE=26214400
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ import {
|
|||||||
Table,
|
Table,
|
||||||
DataType,
|
DataType,
|
||||||
} from "sequelize-typescript";
|
} from "sequelize-typescript";
|
||||||
import { deleteFromS3, getFileByKey } from "@server/utils/s3";
|
import { publicS3Endpoint, deleteFromS3, getFileByKey } from "@server/utils/s3";
|
||||||
import Document from "./Document";
|
import Document from "./Document";
|
||||||
import Team from "./Team";
|
import Team from "./Team";
|
||||||
import User from "./User";
|
import User from "./User";
|
||||||
@@ -55,6 +55,14 @@ class Attachment extends BaseModel {
|
|||||||
return getFileByKey(this.key);
|
return getFileByKey(this.key);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Use this instead of url which will be deleted soon, the column is unneccessary
|
||||||
|
* and was not updated with the migraiton to the new s3 bucket.
|
||||||
|
*/
|
||||||
|
get canonicalUrl() {
|
||||||
|
return `${publicS3Endpoint()}/${this.key}`;
|
||||||
|
}
|
||||||
|
|
||||||
// hooks
|
// hooks
|
||||||
|
|
||||||
@BeforeDestroy
|
@BeforeDestroy
|
||||||
|
|||||||
@@ -135,7 +135,7 @@ router.post("attachments.redirect", auth(), async (ctx) => {
|
|||||||
const accessUrl = await getSignedUrl(attachment.key);
|
const accessUrl = await getSignedUrl(attachment.key);
|
||||||
ctx.redirect(accessUrl);
|
ctx.redirect(accessUrl);
|
||||||
} else {
|
} else {
|
||||||
ctx.redirect(attachment.url);
|
ctx.redirect(attachment.canonicalUrl);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|||||||
@@ -6,22 +6,28 @@ import fetch from "fetch-with-proxy";
|
|||||||
import { v4 as uuidv4 } from "uuid";
|
import { v4 as uuidv4 } from "uuid";
|
||||||
import Logger from "@server/logging/logger";
|
import Logger from "@server/logging/logger";
|
||||||
|
|
||||||
|
const AWS_S3_ACCELERATE_URL = process.env.AWS_S3_ACCELERATE_URL;
|
||||||
const AWS_SECRET_ACCESS_KEY = process.env.AWS_SECRET_ACCESS_KEY;
|
const AWS_SECRET_ACCESS_KEY = process.env.AWS_SECRET_ACCESS_KEY;
|
||||||
const AWS_S3_UPLOAD_BUCKET_URL = process.env.AWS_S3_UPLOAD_BUCKET_URL || "";
|
const AWS_S3_UPLOAD_BUCKET_URL = process.env.AWS_S3_UPLOAD_BUCKET_URL || "";
|
||||||
const AWS_ACCESS_KEY_ID = process.env.AWS_ACCESS_KEY_ID;
|
const AWS_ACCESS_KEY_ID = process.env.AWS_ACCESS_KEY_ID;
|
||||||
const AWS_REGION = process.env.AWS_REGION || "";
|
const AWS_REGION = process.env.AWS_REGION || "";
|
||||||
const AWS_S3_UPLOAD_BUCKET_NAME = process.env.AWS_S3_UPLOAD_BUCKET_NAME || "";
|
const AWS_S3_UPLOAD_BUCKET_NAME = process.env.AWS_S3_UPLOAD_BUCKET_NAME || "";
|
||||||
const AWS_S3_FORCE_PATH_STYLE = process.env.AWS_S3_FORCE_PATH_STYLE !== "false";
|
const AWS_S3_FORCE_PATH_STYLE = process.env.AWS_S3_FORCE_PATH_STYLE !== "false";
|
||||||
|
|
||||||
const s3 = new AWS.S3({
|
const s3 = new AWS.S3({
|
||||||
|
s3BucketEndpoint: AWS_S3_ACCELERATE_URL ? true : undefined,
|
||||||
s3ForcePathStyle: AWS_S3_FORCE_PATH_STYLE,
|
s3ForcePathStyle: AWS_S3_FORCE_PATH_STYLE,
|
||||||
accessKeyId: AWS_ACCESS_KEY_ID,
|
accessKeyId: AWS_ACCESS_KEY_ID,
|
||||||
secretAccessKey: AWS_SECRET_ACCESS_KEY,
|
secretAccessKey: AWS_SECRET_ACCESS_KEY,
|
||||||
region: AWS_REGION,
|
region: AWS_REGION,
|
||||||
endpoint: AWS_S3_UPLOAD_BUCKET_URL.includes(AWS_S3_UPLOAD_BUCKET_NAME)
|
endpoint: AWS_S3_ACCELERATE_URL
|
||||||
|
? AWS_S3_ACCELERATE_URL
|
||||||
|
: AWS_S3_UPLOAD_BUCKET_URL.includes(AWS_S3_UPLOAD_BUCKET_NAME)
|
||||||
? undefined
|
? undefined
|
||||||
: new AWS.Endpoint(AWS_S3_UPLOAD_BUCKET_URL),
|
: new AWS.Endpoint(AWS_S3_UPLOAD_BUCKET_URL),
|
||||||
signatureVersion: "v4",
|
signatureVersion: "v4",
|
||||||
});
|
});
|
||||||
|
|
||||||
const createPresignedPost = util.promisify(s3.createPresignedPost).bind(s3);
|
const createPresignedPost = util.promisify(s3.createPresignedPost).bind(s3);
|
||||||
|
|
||||||
const hmac = (
|
const hmac = (
|
||||||
@@ -116,6 +122,10 @@ export const getPresignedPost = (
|
|||||||
};
|
};
|
||||||
|
|
||||||
export const publicS3Endpoint = (isServerUpload?: boolean) => {
|
export const publicS3Endpoint = (isServerUpload?: boolean) => {
|
||||||
|
if (AWS_S3_ACCELERATE_URL) {
|
||||||
|
return AWS_S3_ACCELERATE_URL;
|
||||||
|
}
|
||||||
|
|
||||||
// lose trailing slash if there is one and convert fake-s3 url to localhost
|
// lose trailing slash if there is one and convert fake-s3 url to localhost
|
||||||
// for access outside of docker containers in local development
|
// for access outside of docker containers in local development
|
||||||
const isDocker = AWS_S3_UPLOAD_BUCKET_URL.match(/http:\/\/s3:/);
|
const isDocker = AWS_S3_UPLOAD_BUCKET_URL.match(/http:\/\/s3:/);
|
||||||
@@ -205,9 +215,16 @@ export const getSignedUrl = async (key: string) => {
|
|||||||
Key: key,
|
Key: key,
|
||||||
Expires: 60,
|
Expires: 60,
|
||||||
};
|
};
|
||||||
return isDocker
|
|
||||||
|
const url = isDocker
|
||||||
? `${publicS3Endpoint()}/${key}`
|
? `${publicS3Endpoint()}/${key}`
|
||||||
: s3.getSignedUrl("getObject", params);
|
: await s3.getSignedUrlPromise("getObject", params);
|
||||||
|
|
||||||
|
if (AWS_S3_ACCELERATE_URL) {
|
||||||
|
return url.replace(AWS_S3_UPLOAD_BUCKET_URL, AWS_S3_ACCELERATE_URL);
|
||||||
|
}
|
||||||
|
|
||||||
|
return url;
|
||||||
};
|
};
|
||||||
|
|
||||||
// function assumes that acl is private
|
// function assumes that acl is private
|
||||||
|
|||||||
Reference in New Issue
Block a user