Migrate from s3 sdk v2 to v3 (#6731)
* chore: migrate from s3 sdk v2 to v3 * import signature-v4-crt * downgrade minor version * Add s3-presigned-post manually * Change s3 mock * Update server/storage/files/S3Storage.ts * docs * Upgrade aws-sdk --------- Co-authored-by: Tom Moor <tom.moor@gmail.com>
This commit is contained in:
@@ -47,6 +47,11 @@
|
||||
"> 0.25%, not dead"
|
||||
],
|
||||
"dependencies": {
|
||||
"@aws-sdk/client-s3": "3.577.0",
|
||||
"@aws-sdk/lib-storage": "3.577.0",
|
||||
"@aws-sdk/s3-presigned-post": "3.577.0",
|
||||
"@aws-sdk/s3-request-presigner": "3.577.0",
|
||||
"@aws-sdk/signature-v4-crt": "^3.577.0",
|
||||
"@babel/core": "^7.23.7",
|
||||
"@babel/plugin-proposal-decorators": "^7.23.2",
|
||||
"@babel/plugin-transform-destructuring": "^7.23.3",
|
||||
@@ -84,7 +89,6 @@
|
||||
"@vitejs/plugin-react": "^3.1.0",
|
||||
"addressparser": "^1.0.1",
|
||||
"autotrack": "^2.4.1",
|
||||
"aws-sdk": "^2.1550.0",
|
||||
"babel-plugin-styled-components": "^2.1.4",
|
||||
"babel-plugin-transform-class-properties": "^6.24.1",
|
||||
"body-scroll-lock": "^4.0.0-beta.0",
|
||||
|
||||
@@ -84,7 +84,7 @@ router.get(
|
||||
"application/octet-stream"
|
||||
);
|
||||
ctx.attachment(fileName);
|
||||
ctx.body = FileStorage.getFileStream(key);
|
||||
ctx.body = await FileStorage.getFileStream(key);
|
||||
} else {
|
||||
const attachment = await Attachment.findOne({
|
||||
where: { key },
|
||||
|
||||
@@ -4,7 +4,6 @@ import env from "./env";
|
||||
|
||||
import "./logging/tracer"; // must come before importing any instrumented module
|
||||
|
||||
import maintenance from "aws-sdk/lib/maintenance_mode_message";
|
||||
import http from "http";
|
||||
import https from "https";
|
||||
import Koa from "koa";
|
||||
@@ -28,9 +27,6 @@ import RedisAdapter from "./storage/redis";
|
||||
import Metrics from "./logging/Metrics";
|
||||
import { PluginManager } from "./utils/PluginManager";
|
||||
|
||||
// Suppress the AWS maintenance message until upgrade to v3.
|
||||
maintenance.suppress = true;
|
||||
|
||||
// The number of processes to run, defaults to the number of CPU's available
|
||||
// for the web service, and 1 for collaboration during the beta period.
|
||||
let webProcessCount = env.WEB_CONCURRENCY;
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import { Blob } from "buffer";
|
||||
import { Readable } from "stream";
|
||||
import { PresignedPost } from "aws-sdk/clients/s3";
|
||||
import { PresignedPost } from "@aws-sdk/s3-presigned-post";
|
||||
import { isBase64Url } from "@shared/utils/urls";
|
||||
import env from "@server/env";
|
||||
import Logger from "@server/logging/Logger";
|
||||
@@ -27,11 +27,13 @@ export default abstract class BaseStorage {
|
||||
): Promise<Partial<PresignedPost>>;
|
||||
|
||||
/**
|
||||
* Returns a stream for reading a file from the storage provider.
|
||||
* Returns a promise that resolves with a stream for reading a file from the storage provider.
|
||||
*
|
||||
* @param key The path to the file
|
||||
*/
|
||||
public abstract getFileStream(key: string): NodeJS.ReadableStream | null;
|
||||
public abstract getFileStream(
|
||||
key: string
|
||||
): Promise<NodeJS.ReadableStream | null>;
|
||||
|
||||
/**
|
||||
* Returns the upload URL for the storage provider.
|
||||
@@ -96,12 +98,13 @@ export default abstract class BaseStorage {
|
||||
}>;
|
||||
|
||||
/**
|
||||
* Returns a buffer of a file from the storage provider.
|
||||
* Returns a promise that resolves to a buffer of a file from the storage provider.
|
||||
*
|
||||
* @param key The path to the file
|
||||
* @returns A promise that resolves with the file buffer
|
||||
*/
|
||||
public async getFileBuffer(key: string) {
|
||||
const stream = this.getFileStream(key);
|
||||
const stream = await this.getFileStream(key);
|
||||
return new Promise<Buffer>((resolve, reject) => {
|
||||
const chunks: Buffer[] = [];
|
||||
if (!stream) {
|
||||
|
||||
@@ -132,7 +132,7 @@ export default class LocalStorage extends BaseStorage {
|
||||
}
|
||||
|
||||
public getFileStream(key: string) {
|
||||
return fs.createReadStream(this.getFilePath(key));
|
||||
return Promise.resolve(fs.createReadStream(this.getFilePath(key)));
|
||||
}
|
||||
|
||||
private getFilePath(key: string) {
|
||||
|
||||
@@ -1,6 +1,18 @@
|
||||
import path from "path";
|
||||
import util from "util";
|
||||
import AWS, { S3 } from "aws-sdk";
|
||||
import { Readable } from "stream";
|
||||
import {
|
||||
S3Client,
|
||||
DeleteObjectCommand,
|
||||
GetObjectCommand,
|
||||
ObjectCannedACL,
|
||||
} from "@aws-sdk/client-s3";
|
||||
import { Upload } from "@aws-sdk/lib-storage";
|
||||
import "@aws-sdk/signature-v4-crt"; // https://github.com/aws/aws-sdk-js-v3#functionality-requiring-aws-common-runtime-crt
|
||||
import {
|
||||
PresignedPostOptions,
|
||||
createPresignedPost,
|
||||
} from "@aws-sdk/s3-presigned-post";
|
||||
import { getSignedUrl } from "@aws-sdk/s3-request-presigner";
|
||||
import fs from "fs-extra";
|
||||
import invariant from "invariant";
|
||||
import compact from "lodash/compact";
|
||||
@@ -13,14 +25,14 @@ export default class S3Storage extends BaseStorage {
|
||||
constructor() {
|
||||
super();
|
||||
|
||||
this.client = new AWS.S3({
|
||||
s3BucketEndpoint: env.AWS_S3_ACCELERATE_URL ? true : undefined,
|
||||
s3ForcePathStyle: env.AWS_S3_FORCE_PATH_STYLE,
|
||||
accessKeyId: env.AWS_ACCESS_KEY_ID,
|
||||
secretAccessKey: env.AWS_SECRET_ACCESS_KEY,
|
||||
this.client = new S3Client({
|
||||
forcePathStyle: env.AWS_S3_FORCE_PATH_STYLE,
|
||||
credentials: {
|
||||
accessKeyId: env.AWS_ACCESS_KEY_ID || "",
|
||||
secretAccessKey: env.AWS_SECRET_ACCESS_KEY || "",
|
||||
},
|
||||
region: env.AWS_REGION,
|
||||
endpoint: this.getEndpoint(),
|
||||
signatureVersion: "v4",
|
||||
});
|
||||
}
|
||||
|
||||
@@ -30,8 +42,9 @@ export default class S3Storage extends BaseStorage {
|
||||
maxUploadSize: number,
|
||||
contentType = "image"
|
||||
) {
|
||||
const params = {
|
||||
Bucket: env.AWS_S3_UPLOAD_BUCKET_NAME,
|
||||
const params: PresignedPostOptions = {
|
||||
Bucket: env.AWS_S3_UPLOAD_BUCKET_NAME as string,
|
||||
Key: key,
|
||||
Conditions: compact([
|
||||
["content-length-range", 0, maxUploadSize],
|
||||
["starts-with", "$Content-Type", contentType],
|
||||
@@ -45,9 +58,7 @@ export default class S3Storage extends BaseStorage {
|
||||
Expires: 3600,
|
||||
};
|
||||
|
||||
return util.promisify(this.client.createPresignedPost).bind(this.client)(
|
||||
params
|
||||
);
|
||||
return createPresignedPost(this.client, params);
|
||||
}
|
||||
|
||||
private getPublicEndpoint(isServerUpload?: boolean) {
|
||||
@@ -96,7 +107,7 @@ export default class S3Storage extends BaseStorage {
|
||||
key,
|
||||
acl,
|
||||
}: {
|
||||
body: S3.Body;
|
||||
body: Buffer | Uint8Array | string | Readable;
|
||||
contentLength?: number;
|
||||
contentType?: string;
|
||||
key: string;
|
||||
@@ -107,17 +118,20 @@ export default class S3Storage extends BaseStorage {
|
||||
"AWS_S3_UPLOAD_BUCKET_NAME is required"
|
||||
);
|
||||
|
||||
await this.client
|
||||
.putObject({
|
||||
ACL: acl,
|
||||
const upload = new Upload({
|
||||
client: this.client,
|
||||
params: {
|
||||
ACL: acl as ObjectCannedACL,
|
||||
Bucket: env.AWS_S3_UPLOAD_BUCKET_NAME,
|
||||
Key: key,
|
||||
ContentType: contentType,
|
||||
ContentLength: contentLength,
|
||||
ContentDisposition: this.getContentDisposition(contentType),
|
||||
Body: body,
|
||||
})
|
||||
.promise();
|
||||
},
|
||||
});
|
||||
await upload.done();
|
||||
|
||||
const endpoint = this.getPublicEndpoint(true);
|
||||
return `${endpoint}/${key}`;
|
||||
};
|
||||
@@ -128,12 +142,12 @@ export default class S3Storage extends BaseStorage {
|
||||
"AWS_S3_UPLOAD_BUCKET_NAME is required"
|
||||
);
|
||||
|
||||
await this.client
|
||||
.deleteObject({
|
||||
await this.client.send(
|
||||
new DeleteObjectCommand({
|
||||
Bucket: env.AWS_S3_UPLOAD_BUCKET_NAME,
|
||||
Key: key,
|
||||
})
|
||||
.promise();
|
||||
);
|
||||
}
|
||||
|
||||
public getSignedUrl = async (
|
||||
@@ -147,18 +161,21 @@ export default class S3Storage extends BaseStorage {
|
||||
Expires: expiresIn,
|
||||
};
|
||||
|
||||
const url = isDocker
|
||||
? `${this.getPublicEndpoint()}/${key}`
|
||||
: await this.client.getSignedUrlPromise("getObject", params);
|
||||
if (isDocker) {
|
||||
return `${this.getPublicEndpoint()}/${key}`;
|
||||
} else {
|
||||
const command = new GetObjectCommand(params);
|
||||
const url = await getSignedUrl(this.client, command);
|
||||
|
||||
if (env.AWS_S3_ACCELERATE_URL) {
|
||||
return url.replace(
|
||||
env.AWS_S3_UPLOAD_BUCKET_URL,
|
||||
env.AWS_S3_ACCELERATE_URL
|
||||
);
|
||||
if (env.AWS_S3_ACCELERATE_URL) {
|
||||
return url.replace(
|
||||
env.AWS_S3_UPLOAD_BUCKET_URL,
|
||||
env.AWS_S3_ACCELERATE_URL
|
||||
);
|
||||
}
|
||||
|
||||
return url;
|
||||
}
|
||||
|
||||
return url;
|
||||
};
|
||||
|
||||
public getFileHandle(key: string): Promise<{
|
||||
@@ -177,44 +194,46 @@ export default class S3Storage extends BaseStorage {
|
||||
resolve({ path: tmpFile, cleanup: () => fs.rm(tmpFile) })
|
||||
);
|
||||
|
||||
const stream = this.getFileStream(key);
|
||||
if (!stream) {
|
||||
return reject(new Error("No stream available"));
|
||||
}
|
||||
void this.getFileStream(key).then((stream) => {
|
||||
if (!stream) {
|
||||
return reject(new Error("No stream available"));
|
||||
}
|
||||
|
||||
stream
|
||||
.on("error", (err) => {
|
||||
dest.end();
|
||||
reject(err);
|
||||
})
|
||||
.pipe(dest);
|
||||
stream
|
||||
.on("error", (err) => {
|
||||
dest.end();
|
||||
reject(err);
|
||||
})
|
||||
.pipe(dest);
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
public getFileStream(key: string) {
|
||||
public getFileStream(key: string): Promise<NodeJS.ReadableStream | null> {
|
||||
invariant(
|
||||
env.AWS_S3_UPLOAD_BUCKET_NAME,
|
||||
"AWS_S3_UPLOAD_BUCKET_NAME is required"
|
||||
);
|
||||
|
||||
try {
|
||||
return this.client
|
||||
.getObject({
|
||||
return this.client
|
||||
.send(
|
||||
new GetObjectCommand({
|
||||
Bucket: env.AWS_S3_UPLOAD_BUCKET_NAME,
|
||||
Key: key,
|
||||
})
|
||||
.createReadStream();
|
||||
} catch (err) {
|
||||
Logger.error("Error getting file stream from S3 ", err, {
|
||||
key,
|
||||
});
|
||||
}
|
||||
)
|
||||
.then((item) => item.Body as NodeJS.ReadableStream)
|
||||
.catch((err) => {
|
||||
Logger.error("Error getting file stream from S3 ", err, {
|
||||
key,
|
||||
});
|
||||
|
||||
return null;
|
||||
return null;
|
||||
});
|
||||
}
|
||||
|
||||
private client: AWS.S3;
|
||||
private client: S3Client;
|
||||
|
||||
private getEndpoint() {
|
||||
if (env.AWS_S3_ACCELERATE_URL) {
|
||||
@@ -230,6 +249,6 @@ export default class S3Storage extends BaseStorage {
|
||||
}
|
||||
}
|
||||
|
||||
return new AWS.Endpoint(env.AWS_S3_UPLOAD_BUCKET_URL);
|
||||
return env.AWS_S3_UPLOAD_BUCKET_URL;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -11,18 +11,28 @@ jest.mock("bull");
|
||||
jest.mock("../queues");
|
||||
|
||||
// We never want to make real S3 requests in test environment
|
||||
jest.mock("aws-sdk", () => {
|
||||
const mS3 = {
|
||||
createPresignedPost: jest.fn(),
|
||||
putObject: jest.fn().mockReturnThis(),
|
||||
deleteObject: jest.fn().mockReturnThis(),
|
||||
promise: jest.fn(),
|
||||
};
|
||||
return {
|
||||
S3: jest.fn(() => mS3),
|
||||
Endpoint: jest.fn(),
|
||||
};
|
||||
});
|
||||
jest.mock("@aws-sdk/client-s3", () => ({
|
||||
S3Client: jest.fn(() => ({
|
||||
send: jest.fn(),
|
||||
})),
|
||||
DeleteObjectCommand: jest.fn(),
|
||||
GetObjectCommand: jest.fn(),
|
||||
ObjectCannedACL: {},
|
||||
}));
|
||||
|
||||
jest.mock("@aws-sdk/lib-storage", () => ({
|
||||
Upload: jest.fn(() => ({
|
||||
done: jest.fn(),
|
||||
})),
|
||||
}));
|
||||
|
||||
jest.mock("@aws-sdk/s3-presigned-post", () => ({
|
||||
createPresignedPost: jest.fn(),
|
||||
}));
|
||||
|
||||
jest.mock("@aws-sdk/s3-request-presigner", () => ({
|
||||
getSignedUrl: jest.fn(),
|
||||
}));
|
||||
|
||||
afterAll(() => Redis.defaultClient.disconnect());
|
||||
|
||||
|
||||
7
server/typings/index.d.ts
vendored
7
server/typings/index.d.ts
vendored
@@ -19,10 +19,3 @@ declare module "@joplin/turndown-plugin-gfm" {
|
||||
export const taskListItems: Plugin;
|
||||
export const gfm: Plugin;
|
||||
}
|
||||
|
||||
declare module "aws-sdk/lib/maintenance_mode_message" {
|
||||
const maintenance: {
|
||||
suppress: boolean;
|
||||
};
|
||||
export default maintenance;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user