Files
outline/server/storage/files/S3Storage.ts
Nanguan Lin 3a7dd94e14 Migrate from s3 sdk v2 to v3 (#6731)
* chore: migrate from s3 sdk v2 to v3

* import signature-v4-crt

* downgrade minor version

* Add s3-presigned-post manually

* Change s3 mock

* Update server/storage/files/S3Storage.ts

* docs

* Upgrade aws-sdk

---------

Co-authored-by: Tom Moor <tom.moor@gmail.com>
2024-05-19 06:01:42 -07:00

255 lines
6.5 KiB
TypeScript

import path from "path";
import { Readable } from "stream";
import {
S3Client,
DeleteObjectCommand,
GetObjectCommand,
ObjectCannedACL,
} from "@aws-sdk/client-s3";
import { Upload } from "@aws-sdk/lib-storage";
import "@aws-sdk/signature-v4-crt"; // https://github.com/aws/aws-sdk-js-v3#functionality-requiring-aws-common-runtime-crt
import {
PresignedPostOptions,
createPresignedPost,
} from "@aws-sdk/s3-presigned-post";
import { getSignedUrl } from "@aws-sdk/s3-request-presigner";
import fs from "fs-extra";
import invariant from "invariant";
import compact from "lodash/compact";
import tmp from "tmp";
import env from "@server/env";
import Logger from "@server/logging/Logger";
import BaseStorage from "./BaseStorage";
export default class S3Storage extends BaseStorage {
constructor() {
super();
this.client = new S3Client({
forcePathStyle: env.AWS_S3_FORCE_PATH_STYLE,
credentials: {
accessKeyId: env.AWS_ACCESS_KEY_ID || "",
secretAccessKey: env.AWS_SECRET_ACCESS_KEY || "",
},
region: env.AWS_REGION,
endpoint: this.getEndpoint(),
});
}
public async getPresignedPost(
key: string,
acl: string,
maxUploadSize: number,
contentType = "image"
) {
const params: PresignedPostOptions = {
Bucket: env.AWS_S3_UPLOAD_BUCKET_NAME as string,
Key: key,
Conditions: compact([
["content-length-range", 0, maxUploadSize],
["starts-with", "$Content-Type", contentType],
["starts-with", "$Cache-Control", ""],
]),
Fields: {
"Content-Disposition": this.getContentDisposition(contentType),
key,
acl,
},
Expires: 3600,
};
return createPresignedPost(this.client, params);
}
private getPublicEndpoint(isServerUpload?: boolean) {
if (env.AWS_S3_ACCELERATE_URL) {
return env.AWS_S3_ACCELERATE_URL;
}
invariant(
env.AWS_S3_UPLOAD_BUCKET_NAME,
"AWS_S3_UPLOAD_BUCKET_NAME is required"
);
// lose trailing slash if there is one and convert fake-s3 url to localhost
// for access outside of docker containers in local development
const isDocker = env.AWS_S3_UPLOAD_BUCKET_URL.match(/http:\/\/s3:/);
const host = env.AWS_S3_UPLOAD_BUCKET_URL.replace(
"s3:",
"localhost:"
).replace(/\/$/, "");
// support old path-style S3 uploads and new virtual host uploads by checking
// for the bucket name in the endpoint url before appending.
const isVirtualHost = host.includes(env.AWS_S3_UPLOAD_BUCKET_NAME);
if (isVirtualHost) {
return host;
}
return `${host}/${isServerUpload && isDocker ? "s3/" : ""}${
env.AWS_S3_UPLOAD_BUCKET_NAME
}`;
}
public getUploadUrl(isServerUpload?: boolean) {
return this.getPublicEndpoint(isServerUpload);
}
public getUrlForKey(key: string): string {
return `${this.getPublicEndpoint()}/${key}`;
}
public store = async ({
body,
contentLength,
contentType,
key,
acl,
}: {
body: Buffer | Uint8Array | string | Readable;
contentLength?: number;
contentType?: string;
key: string;
acl?: string;
}) => {
invariant(
env.AWS_S3_UPLOAD_BUCKET_NAME,
"AWS_S3_UPLOAD_BUCKET_NAME is required"
);
const upload = new Upload({
client: this.client,
params: {
ACL: acl as ObjectCannedACL,
Bucket: env.AWS_S3_UPLOAD_BUCKET_NAME,
Key: key,
ContentType: contentType,
ContentLength: contentLength,
ContentDisposition: this.getContentDisposition(contentType),
Body: body,
},
});
await upload.done();
const endpoint = this.getPublicEndpoint(true);
return `${endpoint}/${key}`;
};
public async deleteFile(key: string) {
invariant(
env.AWS_S3_UPLOAD_BUCKET_NAME,
"AWS_S3_UPLOAD_BUCKET_NAME is required"
);
await this.client.send(
new DeleteObjectCommand({
Bucket: env.AWS_S3_UPLOAD_BUCKET_NAME,
Key: key,
})
);
}
public getSignedUrl = async (
key: string,
expiresIn = S3Storage.defaultSignedUrlExpires
) => {
const isDocker = env.AWS_S3_UPLOAD_BUCKET_URL.match(/http:\/\/s3:/);
const params = {
Bucket: env.AWS_S3_UPLOAD_BUCKET_NAME,
Key: key,
Expires: expiresIn,
};
if (isDocker) {
return `${this.getPublicEndpoint()}/${key}`;
} else {
const command = new GetObjectCommand(params);
const url = await getSignedUrl(this.client, command);
if (env.AWS_S3_ACCELERATE_URL) {
return url.replace(
env.AWS_S3_UPLOAD_BUCKET_URL,
env.AWS_S3_ACCELERATE_URL
);
}
return url;
}
};
public getFileHandle(key: string): Promise<{
path: string;
cleanup: () => Promise<void>;
}> {
return new Promise((resolve, reject) => {
tmp.dir((err, tmpDir) => {
if (err) {
return reject(err);
}
const tmpFile = path.join(tmpDir, "tmp");
const dest = fs.createWriteStream(tmpFile);
dest.on("error", reject);
dest.on("finish", () =>
resolve({ path: tmpFile, cleanup: () => fs.rm(tmpFile) })
);
void this.getFileStream(key).then((stream) => {
if (!stream) {
return reject(new Error("No stream available"));
}
stream
.on("error", (err) => {
dest.end();
reject(err);
})
.pipe(dest);
});
});
});
}
public getFileStream(key: string): Promise<NodeJS.ReadableStream | null> {
invariant(
env.AWS_S3_UPLOAD_BUCKET_NAME,
"AWS_S3_UPLOAD_BUCKET_NAME is required"
);
return this.client
.send(
new GetObjectCommand({
Bucket: env.AWS_S3_UPLOAD_BUCKET_NAME,
Key: key,
})
)
.then((item) => item.Body as NodeJS.ReadableStream)
.catch((err) => {
Logger.error("Error getting file stream from S3 ", err, {
key,
});
return null;
});
}
private client: S3Client;
private getEndpoint() {
if (env.AWS_S3_ACCELERATE_URL) {
return env.AWS_S3_ACCELERATE_URL;
}
// support old path-style S3 uploads and new virtual host uploads by
// checking for the bucket name in the endpoint url.
if (env.AWS_S3_UPLOAD_BUCKET_NAME) {
const url = new URL(env.AWS_S3_UPLOAD_BUCKET_URL);
if (url.hostname.startsWith(env.AWS_S3_UPLOAD_BUCKET_NAME + ".")) {
return undefined;
}
}
return env.AWS_S3_UPLOAD_BUCKET_URL;
}
}