Migrate from s3 sdk v2 to v3 (#6731)
* chore: migrate from s3 sdk v2 to v3 * import signature-v4-crt * downgrade minor version * Add s3-presigned-post manually * Change s3 mock * Update server/storage/files/S3Storage.ts * docs * Upgrade aws-sdk --------- Co-authored-by: Tom Moor <tom.moor@gmail.com>
This commit is contained in:
@@ -1,6 +1,18 @@
|
||||
import path from "path";
|
||||
import util from "util";
|
||||
import AWS, { S3 } from "aws-sdk";
|
||||
import { Readable } from "stream";
|
||||
import {
|
||||
S3Client,
|
||||
DeleteObjectCommand,
|
||||
GetObjectCommand,
|
||||
ObjectCannedACL,
|
||||
} from "@aws-sdk/client-s3";
|
||||
import { Upload } from "@aws-sdk/lib-storage";
|
||||
import "@aws-sdk/signature-v4-crt"; // https://github.com/aws/aws-sdk-js-v3#functionality-requiring-aws-common-runtime-crt
|
||||
import {
|
||||
PresignedPostOptions,
|
||||
createPresignedPost,
|
||||
} from "@aws-sdk/s3-presigned-post";
|
||||
import { getSignedUrl } from "@aws-sdk/s3-request-presigner";
|
||||
import fs from "fs-extra";
|
||||
import invariant from "invariant";
|
||||
import compact from "lodash/compact";
|
||||
@@ -13,14 +25,14 @@ export default class S3Storage extends BaseStorage {
|
||||
constructor() {
|
||||
super();
|
||||
|
||||
this.client = new AWS.S3({
|
||||
s3BucketEndpoint: env.AWS_S3_ACCELERATE_URL ? true : undefined,
|
||||
s3ForcePathStyle: env.AWS_S3_FORCE_PATH_STYLE,
|
||||
accessKeyId: env.AWS_ACCESS_KEY_ID,
|
||||
secretAccessKey: env.AWS_SECRET_ACCESS_KEY,
|
||||
this.client = new S3Client({
|
||||
forcePathStyle: env.AWS_S3_FORCE_PATH_STYLE,
|
||||
credentials: {
|
||||
accessKeyId: env.AWS_ACCESS_KEY_ID || "",
|
||||
secretAccessKey: env.AWS_SECRET_ACCESS_KEY || "",
|
||||
},
|
||||
region: env.AWS_REGION,
|
||||
endpoint: this.getEndpoint(),
|
||||
signatureVersion: "v4",
|
||||
});
|
||||
}
|
||||
|
||||
@@ -30,8 +42,9 @@ export default class S3Storage extends BaseStorage {
|
||||
maxUploadSize: number,
|
||||
contentType = "image"
|
||||
) {
|
||||
const params = {
|
||||
Bucket: env.AWS_S3_UPLOAD_BUCKET_NAME,
|
||||
const params: PresignedPostOptions = {
|
||||
Bucket: env.AWS_S3_UPLOAD_BUCKET_NAME as string,
|
||||
Key: key,
|
||||
Conditions: compact([
|
||||
["content-length-range", 0, maxUploadSize],
|
||||
["starts-with", "$Content-Type", contentType],
|
||||
@@ -45,9 +58,7 @@ export default class S3Storage extends BaseStorage {
|
||||
Expires: 3600,
|
||||
};
|
||||
|
||||
return util.promisify(this.client.createPresignedPost).bind(this.client)(
|
||||
params
|
||||
);
|
||||
return createPresignedPost(this.client, params);
|
||||
}
|
||||
|
||||
private getPublicEndpoint(isServerUpload?: boolean) {
|
||||
@@ -96,7 +107,7 @@ export default class S3Storage extends BaseStorage {
|
||||
key,
|
||||
acl,
|
||||
}: {
|
||||
body: S3.Body;
|
||||
body: Buffer | Uint8Array | string | Readable;
|
||||
contentLength?: number;
|
||||
contentType?: string;
|
||||
key: string;
|
||||
@@ -107,17 +118,20 @@ export default class S3Storage extends BaseStorage {
|
||||
"AWS_S3_UPLOAD_BUCKET_NAME is required"
|
||||
);
|
||||
|
||||
await this.client
|
||||
.putObject({
|
||||
ACL: acl,
|
||||
const upload = new Upload({
|
||||
client: this.client,
|
||||
params: {
|
||||
ACL: acl as ObjectCannedACL,
|
||||
Bucket: env.AWS_S3_UPLOAD_BUCKET_NAME,
|
||||
Key: key,
|
||||
ContentType: contentType,
|
||||
ContentLength: contentLength,
|
||||
ContentDisposition: this.getContentDisposition(contentType),
|
||||
Body: body,
|
||||
})
|
||||
.promise();
|
||||
},
|
||||
});
|
||||
await upload.done();
|
||||
|
||||
const endpoint = this.getPublicEndpoint(true);
|
||||
return `${endpoint}/${key}`;
|
||||
};
|
||||
@@ -128,12 +142,12 @@ export default class S3Storage extends BaseStorage {
|
||||
"AWS_S3_UPLOAD_BUCKET_NAME is required"
|
||||
);
|
||||
|
||||
await this.client
|
||||
.deleteObject({
|
||||
await this.client.send(
|
||||
new DeleteObjectCommand({
|
||||
Bucket: env.AWS_S3_UPLOAD_BUCKET_NAME,
|
||||
Key: key,
|
||||
})
|
||||
.promise();
|
||||
);
|
||||
}
|
||||
|
||||
public getSignedUrl = async (
|
||||
@@ -147,18 +161,21 @@ export default class S3Storage extends BaseStorage {
|
||||
Expires: expiresIn,
|
||||
};
|
||||
|
||||
const url = isDocker
|
||||
? `${this.getPublicEndpoint()}/${key}`
|
||||
: await this.client.getSignedUrlPromise("getObject", params);
|
||||
if (isDocker) {
|
||||
return `${this.getPublicEndpoint()}/${key}`;
|
||||
} else {
|
||||
const command = new GetObjectCommand(params);
|
||||
const url = await getSignedUrl(this.client, command);
|
||||
|
||||
if (env.AWS_S3_ACCELERATE_URL) {
|
||||
return url.replace(
|
||||
env.AWS_S3_UPLOAD_BUCKET_URL,
|
||||
env.AWS_S3_ACCELERATE_URL
|
||||
);
|
||||
if (env.AWS_S3_ACCELERATE_URL) {
|
||||
return url.replace(
|
||||
env.AWS_S3_UPLOAD_BUCKET_URL,
|
||||
env.AWS_S3_ACCELERATE_URL
|
||||
);
|
||||
}
|
||||
|
||||
return url;
|
||||
}
|
||||
|
||||
return url;
|
||||
};
|
||||
|
||||
public getFileHandle(key: string): Promise<{
|
||||
@@ -177,44 +194,46 @@ export default class S3Storage extends BaseStorage {
|
||||
resolve({ path: tmpFile, cleanup: () => fs.rm(tmpFile) })
|
||||
);
|
||||
|
||||
const stream = this.getFileStream(key);
|
||||
if (!stream) {
|
||||
return reject(new Error("No stream available"));
|
||||
}
|
||||
void this.getFileStream(key).then((stream) => {
|
||||
if (!stream) {
|
||||
return reject(new Error("No stream available"));
|
||||
}
|
||||
|
||||
stream
|
||||
.on("error", (err) => {
|
||||
dest.end();
|
||||
reject(err);
|
||||
})
|
||||
.pipe(dest);
|
||||
stream
|
||||
.on("error", (err) => {
|
||||
dest.end();
|
||||
reject(err);
|
||||
})
|
||||
.pipe(dest);
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
public getFileStream(key: string) {
|
||||
public getFileStream(key: string): Promise<NodeJS.ReadableStream | null> {
|
||||
invariant(
|
||||
env.AWS_S3_UPLOAD_BUCKET_NAME,
|
||||
"AWS_S3_UPLOAD_BUCKET_NAME is required"
|
||||
);
|
||||
|
||||
try {
|
||||
return this.client
|
||||
.getObject({
|
||||
return this.client
|
||||
.send(
|
||||
new GetObjectCommand({
|
||||
Bucket: env.AWS_S3_UPLOAD_BUCKET_NAME,
|
||||
Key: key,
|
||||
})
|
||||
.createReadStream();
|
||||
} catch (err) {
|
||||
Logger.error("Error getting file stream from S3 ", err, {
|
||||
key,
|
||||
});
|
||||
}
|
||||
)
|
||||
.then((item) => item.Body as NodeJS.ReadableStream)
|
||||
.catch((err) => {
|
||||
Logger.error("Error getting file stream from S3 ", err, {
|
||||
key,
|
||||
});
|
||||
|
||||
return null;
|
||||
return null;
|
||||
});
|
||||
}
|
||||
|
||||
private client: AWS.S3;
|
||||
private client: S3Client;
|
||||
|
||||
private getEndpoint() {
|
||||
if (env.AWS_S3_ACCELERATE_URL) {
|
||||
@@ -230,6 +249,6 @@ export default class S3Storage extends BaseStorage {
|
||||
}
|
||||
}
|
||||
|
||||
return new AWS.Endpoint(env.AWS_S3_UPLOAD_BUCKET_URL);
|
||||
return env.AWS_S3_UPLOAD_BUCKET_URL;
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user