chore: Refactor file storage (#5711)
This commit is contained in:
99
server/storage/files/BaseStorage.ts
Normal file
99
server/storage/files/BaseStorage.ts
Normal file
@@ -0,0 +1,99 @@
|
||||
import { Readable } from "stream";
|
||||
import { PresignedPost } from "aws-sdk/clients/s3";
|
||||
|
||||
export default abstract class BaseStorage {
|
||||
/**
|
||||
* Returns a presigned post for uploading files to the storage provider.
|
||||
*
|
||||
* @param key The path to store the file at
|
||||
* @param acl The ACL to use
|
||||
* @param maxUploadSize The maximum upload size in bytes
|
||||
* @param contentType The content type of the file
|
||||
* @returns The presigned post object to use on the client (TODO: Abstract away from S3)
|
||||
*/
|
||||
public abstract getPresignedPost(
|
||||
key: string,
|
||||
acl: string,
|
||||
maxUploadSize: number,
|
||||
contentType: string
|
||||
): Promise<PresignedPost>;
|
||||
|
||||
/**
|
||||
* Returns a stream for reading a file from the storage provider.
|
||||
*
|
||||
* @param key The path to the file
|
||||
*/
|
||||
public abstract getFileStream(key: string): NodeJS.ReadableStream | null;
|
||||
|
||||
/**
|
||||
* Returns a buffer of a file from the storage provider.
|
||||
*
|
||||
* @param key The path to the file
|
||||
*/
|
||||
public abstract getFileBuffer(key: string): Promise<Blob>;
|
||||
|
||||
/**
|
||||
* Returns the public endpoint for the storage provider.
|
||||
*
|
||||
* @param isServerUpload Whether the upload is happening on the server or not
|
||||
* @returns The public endpoint as a string
|
||||
*/
|
||||
public abstract getPublicEndpoint(isServerUpload?: boolean): string;
|
||||
|
||||
/**
|
||||
* Returns a signed URL for a file from the storage provider.
|
||||
*
|
||||
* @param key The path to the file
|
||||
* @param expiresIn An optional number of seconds until the URL expires
|
||||
*/
|
||||
public abstract getSignedUrl(
|
||||
key: string,
|
||||
expiresIn?: number
|
||||
): Promise<string>;
|
||||
|
||||
/**
|
||||
* Upload a file to the storage provider.
|
||||
*
|
||||
* @param body The file body
|
||||
* @param contentLength The content length of the file
|
||||
* @param contentType The content type of the file
|
||||
* @param key The path to store the file at
|
||||
* @param acl The ACL to use
|
||||
* @returns The URL of the file
|
||||
*/
|
||||
public abstract upload({
|
||||
body,
|
||||
contentLength,
|
||||
contentType,
|
||||
key,
|
||||
acl,
|
||||
}: {
|
||||
body: Buffer | Uint8Array | Blob | string | Readable;
|
||||
contentLength: number;
|
||||
contentType: string;
|
||||
key: string;
|
||||
acl: string;
|
||||
}): Promise<string | undefined>;
|
||||
|
||||
/**
|
||||
* Upload a file to the storage provider directly from a remote URL.
|
||||
*
|
||||
* @param url The URL to upload from
|
||||
* @param key The path to store the file at
|
||||
* @param acl The ACL to use
|
||||
* @returns The URL of the file
|
||||
*/
|
||||
public abstract uploadFromUrl(
|
||||
url: string,
|
||||
key: string,
|
||||
acl: string
|
||||
): Promise<string | undefined>;
|
||||
|
||||
/**
|
||||
* Delete a file from the storage provider.
|
||||
*
|
||||
* @param key The path to the file
|
||||
* @returns A promise that resolves when the file is deleted
|
||||
*/
|
||||
public abstract deleteFile(key: string): Promise<void>;
|
||||
}
|
||||
254
server/storage/files/S3Storage.ts
Normal file
254
server/storage/files/S3Storage.ts
Normal file
@@ -0,0 +1,254 @@
|
||||
import util from "util";
|
||||
import AWS, { S3 } from "aws-sdk";
|
||||
import fetch from "fetch-with-proxy";
|
||||
import invariant from "invariant";
|
||||
import compact from "lodash/compact";
|
||||
import { useAgent } from "request-filtering-agent";
|
||||
import env from "@server/env";
|
||||
import Logger from "@server/logging/Logger";
|
||||
import BaseStorage from "./BaseStorage";
|
||||
|
||||
export default class S3Storage extends BaseStorage {
|
||||
constructor() {
|
||||
super();
|
||||
|
||||
this.client = new AWS.S3({
|
||||
s3BucketEndpoint: env.AWS_S3_ACCELERATE_URL ? true : undefined,
|
||||
s3ForcePathStyle: env.AWS_S3_FORCE_PATH_STYLE,
|
||||
accessKeyId: env.AWS_ACCESS_KEY_ID,
|
||||
secretAccessKey: env.AWS_SECRET_ACCESS_KEY,
|
||||
region: env.AWS_REGION,
|
||||
endpoint: this.getEndpoint(),
|
||||
signatureVersion: "v4",
|
||||
});
|
||||
}
|
||||
|
||||
public async getPresignedPost(
|
||||
key: string,
|
||||
acl: string,
|
||||
maxUploadSize: number,
|
||||
contentType = "image"
|
||||
) {
|
||||
const params = {
|
||||
Bucket: env.AWS_S3_UPLOAD_BUCKET_NAME,
|
||||
Conditions: compact([
|
||||
["content-length-range", 0, maxUploadSize],
|
||||
["starts-with", "$Content-Type", contentType],
|
||||
["starts-with", "$Cache-Control", ""],
|
||||
]),
|
||||
Fields: {
|
||||
"Content-Disposition": "attachment",
|
||||
key,
|
||||
acl,
|
||||
},
|
||||
Expires: 3600,
|
||||
};
|
||||
|
||||
return util.promisify(this.client.createPresignedPost).bind(this.client)(
|
||||
params
|
||||
);
|
||||
}
|
||||
|
||||
public getPublicEndpoint(isServerUpload?: boolean) {
|
||||
if (env.AWS_S3_ACCELERATE_URL) {
|
||||
return env.AWS_S3_ACCELERATE_URL;
|
||||
}
|
||||
invariant(
|
||||
env.AWS_S3_UPLOAD_BUCKET_NAME,
|
||||
"AWS_S3_UPLOAD_BUCKET_NAME is required"
|
||||
);
|
||||
|
||||
// lose trailing slash if there is one and convert fake-s3 url to localhost
|
||||
// for access outside of docker containers in local development
|
||||
const isDocker = env.AWS_S3_UPLOAD_BUCKET_URL.match(/http:\/\/s3:/);
|
||||
|
||||
const host = env.AWS_S3_UPLOAD_BUCKET_URL.replace(
|
||||
"s3:",
|
||||
"localhost:"
|
||||
).replace(/\/$/, "");
|
||||
|
||||
// support old path-style S3 uploads and new virtual host uploads by checking
|
||||
// for the bucket name in the endpoint url before appending.
|
||||
const isVirtualHost = host.includes(env.AWS_S3_UPLOAD_BUCKET_NAME);
|
||||
|
||||
if (isVirtualHost) {
|
||||
return host;
|
||||
}
|
||||
|
||||
return `${host}/${isServerUpload && isDocker ? "s3/" : ""}${
|
||||
env.AWS_S3_UPLOAD_BUCKET_NAME
|
||||
}`;
|
||||
}
|
||||
|
||||
public upload = async ({
|
||||
body,
|
||||
contentLength,
|
||||
contentType,
|
||||
key,
|
||||
acl,
|
||||
}: {
|
||||
body: S3.Body;
|
||||
contentLength: number;
|
||||
contentType: string;
|
||||
key: string;
|
||||
acl: string;
|
||||
}) => {
|
||||
invariant(
|
||||
env.AWS_S3_UPLOAD_BUCKET_NAME,
|
||||
"AWS_S3_UPLOAD_BUCKET_NAME is required"
|
||||
);
|
||||
|
||||
await this.client
|
||||
.putObject({
|
||||
ACL: acl,
|
||||
Bucket: env.AWS_S3_UPLOAD_BUCKET_NAME,
|
||||
Key: key,
|
||||
ContentType: contentType,
|
||||
ContentLength: contentLength,
|
||||
ContentDisposition: "attachment",
|
||||
Body: body,
|
||||
})
|
||||
.promise();
|
||||
const endpoint = this.getPublicEndpoint(true);
|
||||
return `${endpoint}/${key}`;
|
||||
};
|
||||
|
||||
public async uploadFromUrl(url: string, key: string, acl: string) {
|
||||
invariant(
|
||||
env.AWS_S3_UPLOAD_BUCKET_NAME,
|
||||
"AWS_S3_UPLOAD_BUCKET_NAME is required"
|
||||
);
|
||||
|
||||
const endpoint = this.getPublicEndpoint(true);
|
||||
if (url.startsWith("/api") || url.startsWith(endpoint)) {
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
const res = await fetch(url, {
|
||||
agent: useAgent(url),
|
||||
});
|
||||
const buffer = await res.buffer();
|
||||
await this.client
|
||||
.putObject({
|
||||
ACL: acl,
|
||||
Bucket: env.AWS_S3_UPLOAD_BUCKET_NAME,
|
||||
Key: key,
|
||||
ContentType: res.headers["content-type"],
|
||||
ContentLength: res.headers["content-length"],
|
||||
ContentDisposition: "attachment",
|
||||
Body: buffer,
|
||||
})
|
||||
.promise();
|
||||
return `${endpoint}/${key}`;
|
||||
} catch (err) {
|
||||
Logger.error("Error uploading to S3 from URL", err, {
|
||||
url,
|
||||
key,
|
||||
acl,
|
||||
});
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
public async deleteFile(key: string) {
|
||||
invariant(
|
||||
env.AWS_S3_UPLOAD_BUCKET_NAME,
|
||||
"AWS_S3_UPLOAD_BUCKET_NAME is required"
|
||||
);
|
||||
|
||||
await this.client
|
||||
.deleteObject({
|
||||
Bucket: env.AWS_S3_UPLOAD_BUCKET_NAME,
|
||||
Key: key,
|
||||
})
|
||||
.promise();
|
||||
}
|
||||
|
||||
public getSignedUrl = async (key: string, expiresIn = 60) => {
|
||||
const isDocker = env.AWS_S3_UPLOAD_BUCKET_URL.match(/http:\/\/s3:/);
|
||||
const params = {
|
||||
Bucket: env.AWS_S3_UPLOAD_BUCKET_NAME,
|
||||
Key: key,
|
||||
Expires: expiresIn,
|
||||
ResponseContentDisposition: "attachment",
|
||||
};
|
||||
|
||||
const url = isDocker
|
||||
? `${this.getPublicEndpoint()}/${key}`
|
||||
: await this.client.getSignedUrlPromise("getObject", params);
|
||||
|
||||
if (env.AWS_S3_ACCELERATE_URL) {
|
||||
return url.replace(
|
||||
env.AWS_S3_UPLOAD_BUCKET_URL,
|
||||
env.AWS_S3_ACCELERATE_URL
|
||||
);
|
||||
}
|
||||
|
||||
return url;
|
||||
};
|
||||
|
||||
public getFileStream(key: string) {
|
||||
invariant(
|
||||
env.AWS_S3_UPLOAD_BUCKET_NAME,
|
||||
"AWS_S3_UPLOAD_BUCKET_NAME is required"
|
||||
);
|
||||
|
||||
try {
|
||||
return this.client
|
||||
.getObject({
|
||||
Bucket: env.AWS_S3_UPLOAD_BUCKET_NAME,
|
||||
Key: key,
|
||||
})
|
||||
.createReadStream();
|
||||
} catch (err) {
|
||||
Logger.error("Error getting file stream from S3 ", err, {
|
||||
key,
|
||||
});
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
public async getFileBuffer(key: string) {
|
||||
invariant(
|
||||
env.AWS_S3_UPLOAD_BUCKET_NAME,
|
||||
"AWS_S3_UPLOAD_BUCKET_NAME is required"
|
||||
);
|
||||
|
||||
const response = await this.client
|
||||
.getObject({
|
||||
Bucket: env.AWS_S3_UPLOAD_BUCKET_NAME,
|
||||
Key: key,
|
||||
})
|
||||
.promise();
|
||||
|
||||
if (response.Body) {
|
||||
return response.Body as Blob;
|
||||
}
|
||||
|
||||
throw new Error("Error getting file buffer from S3");
|
||||
}
|
||||
|
||||
private client: AWS.S3;
|
||||
|
||||
private getEndpoint() {
|
||||
if (env.AWS_S3_ACCELERATE_URL) {
|
||||
return env.AWS_S3_ACCELERATE_URL;
|
||||
}
|
||||
|
||||
// support old path-style S3 uploads and new virtual host uploads by
|
||||
// checking for the bucket name in the endpoint url.
|
||||
if (
|
||||
env.AWS_S3_UPLOAD_BUCKET_NAME &&
|
||||
env.AWS_S3_FORCE_PATH_STYLE === false
|
||||
) {
|
||||
const url = new URL(env.AWS_S3_UPLOAD_BUCKET_URL);
|
||||
if (url.hostname.startsWith(env.AWS_S3_UPLOAD_BUCKET_NAME + ".")) {
|
||||
return undefined;
|
||||
}
|
||||
}
|
||||
|
||||
return new AWS.Endpoint(env.AWS_S3_UPLOAD_BUCKET_URL);
|
||||
}
|
||||
}
|
||||
9
server/storage/files/__mocks__/index.ts
Normal file
9
server/storage/files/__mocks__/index.ts
Normal file
@@ -0,0 +1,9 @@
|
||||
export default {
|
||||
upload: jest.fn().mockReturnValue("/endpoint/key"),
|
||||
|
||||
getPublicEndpoint: jest.fn().mockReturnValue("http://mock"),
|
||||
|
||||
getSignedUrl: jest.fn().mockReturnValue("http://s3mock"),
|
||||
|
||||
getPresignedPost: jest.fn().mockReturnValue({}),
|
||||
};
|
||||
3
server/storage/files/index.ts
Normal file
3
server/storage/files/index.ts
Normal file
@@ -0,0 +1,3 @@
|
||||
import S3Storage from "./S3Storage";
|
||||
|
||||
export default new S3Storage();
|
||||
Reference in New Issue
Block a user