Some checks failed
CI/CD Pipeline - Xpeditis PreProd / Frontend - Build & Test (push) Failing after 5m31s
CI/CD Pipeline - Xpeditis PreProd / Frontend - Docker Build & Push (push) Has been skipped
CI/CD Pipeline - Xpeditis PreProd / Backend - Build & Test (push) Failing after 5m42s
CI/CD Pipeline - Xpeditis PreProd / Backend - Docker Build & Push (push) Has been skipped
CI/CD Pipeline - Xpeditis PreProd / Deploy to PreProd Server (push) Has been skipped
CI/CD Pipeline - Xpeditis PreProd / Run Smoke Tests (push) Has been skipped
228 lines
6.4 KiB
TypeScript
228 lines
6.4 KiB
TypeScript
/**
|
|
* S3 Storage Adapter
|
|
*
|
|
* Implements StoragePort using AWS S3
|
|
*/
|
|
|
|
import { Injectable, Logger } from '@nestjs/common';
|
|
import { ConfigService } from '@nestjs/config';
|
|
import {
|
|
S3Client,
|
|
PutObjectCommand,
|
|
GetObjectCommand,
|
|
DeleteObjectCommand,
|
|
HeadObjectCommand,
|
|
ListObjectsV2Command,
|
|
} from '@aws-sdk/client-s3';
|
|
import { getSignedUrl } from '@aws-sdk/s3-request-presigner';
|
|
import {
|
|
StoragePort,
|
|
UploadOptions,
|
|
DownloadOptions,
|
|
DeleteOptions,
|
|
StorageObject,
|
|
} from '../../domain/ports/out/storage.port';
|
|
|
|
@Injectable()
|
|
export class S3StorageAdapter implements StoragePort {
|
|
private readonly logger = new Logger(S3StorageAdapter.name);
|
|
private s3Client: S3Client;
|
|
|
|
constructor(private readonly configService: ConfigService) {
|
|
this.initializeS3Client();
|
|
}
|
|
|
|
private initializeS3Client(): void {
|
|
const region = this.configService.get<string>('AWS_REGION', 'us-east-1');
|
|
const endpoint = this.configService.get<string>('AWS_S3_ENDPOINT');
|
|
const accessKeyId = this.configService.get<string>('AWS_ACCESS_KEY_ID');
|
|
const secretAccessKey = this.configService.get<string>('AWS_SECRET_ACCESS_KEY');
|
|
|
|
// Check if S3/MinIO is configured
|
|
const isConfigured = endpoint || (accessKeyId && secretAccessKey);
|
|
|
|
if (!isConfigured) {
|
|
this.logger.warn(
|
|
'S3 Storage adapter is NOT configured (no endpoint or credentials). Storage operations will fail. ' +
|
|
'Set AWS_S3_ENDPOINT for MinIO or AWS_ACCESS_KEY_ID/AWS_SECRET_ACCESS_KEY for AWS S3.'
|
|
);
|
|
// Don't initialize client if not configured
|
|
return;
|
|
}
|
|
|
|
this.s3Client = new S3Client({
|
|
region,
|
|
endpoint,
|
|
credentials:
|
|
accessKeyId && secretAccessKey
|
|
? {
|
|
accessKeyId,
|
|
secretAccessKey,
|
|
}
|
|
: undefined,
|
|
forcePathStyle: !!endpoint, // Required for MinIO
|
|
});
|
|
|
|
this.logger.log(
|
|
`S3 Storage adapter initialized with region: ${region}${
|
|
endpoint ? ` (endpoint: ${endpoint})` : ''
|
|
}`
|
|
);
|
|
}
|
|
|
|
async upload(options: UploadOptions): Promise<StorageObject> {
|
|
if (!this.s3Client) {
|
|
throw new Error('S3 Storage is not configured. Set AWS_S3_ENDPOINT or AWS credentials in .env');
|
|
}
|
|
|
|
try {
|
|
const command = new PutObjectCommand({
|
|
Bucket: options.bucket,
|
|
Key: options.key,
|
|
Body: options.body,
|
|
ContentType: options.contentType,
|
|
Metadata: options.metadata,
|
|
// ACL is deprecated in favor of bucket policies
|
|
});
|
|
|
|
await this.s3Client.send(command);
|
|
|
|
const url = this.buildUrl(options.bucket, options.key);
|
|
const size =
|
|
typeof options.body === 'string' ? Buffer.byteLength(options.body) : options.body.length;
|
|
|
|
this.logger.log(`Uploaded file to S3: ${options.key}`);
|
|
|
|
return {
|
|
key: options.key,
|
|
url,
|
|
size,
|
|
contentType: options.contentType,
|
|
};
|
|
} catch (error) {
|
|
this.logger.error(`Failed to upload file to S3: ${options.key}`, error);
|
|
throw error;
|
|
}
|
|
}
|
|
|
|
async download(options: DownloadOptions): Promise<Buffer> {
|
|
try {
|
|
const command = new GetObjectCommand({
|
|
Bucket: options.bucket,
|
|
Key: options.key,
|
|
});
|
|
|
|
const response = await this.s3Client.send(command);
|
|
const stream = response.Body as any;
|
|
|
|
const chunks: Uint8Array[] = [];
|
|
for await (const chunk of stream) {
|
|
chunks.push(chunk);
|
|
}
|
|
|
|
this.logger.log(`Downloaded file from S3: ${options.key}`);
|
|
return Buffer.concat(chunks);
|
|
} catch (error) {
|
|
this.logger.error(`Failed to download file from S3: ${options.key}`, error);
|
|
throw error;
|
|
}
|
|
}
|
|
|
|
async delete(options: DeleteOptions): Promise<void> {
|
|
try {
|
|
const command = new DeleteObjectCommand({
|
|
Bucket: options.bucket,
|
|
Key: options.key,
|
|
});
|
|
|
|
await this.s3Client.send(command);
|
|
this.logger.log(`Deleted file from S3: ${options.key}`);
|
|
} catch (error) {
|
|
this.logger.error(`Failed to delete file from S3: ${options.key}`, error);
|
|
throw error;
|
|
}
|
|
}
|
|
|
|
async getSignedUrl(options: DownloadOptions, expiresIn: number = 3600): Promise<string> {
|
|
try {
|
|
const command = new GetObjectCommand({
|
|
Bucket: options.bucket,
|
|
Key: options.key,
|
|
});
|
|
|
|
const url = await getSignedUrl(this.s3Client, command, { expiresIn });
|
|
this.logger.log(`Generated signed URL for: ${options.key} (expires in ${expiresIn}s)`);
|
|
return url;
|
|
} catch (error) {
|
|
this.logger.error(`Failed to generate signed URL for: ${options.key}`, error);
|
|
throw error;
|
|
}
|
|
}
|
|
|
|
async exists(options: DownloadOptions): Promise<boolean> {
|
|
try {
|
|
const command = new HeadObjectCommand({
|
|
Bucket: options.bucket,
|
|
Key: options.key,
|
|
});
|
|
|
|
await this.s3Client.send(command);
|
|
return true;
|
|
} catch (error: any) {
|
|
if (error.name === 'NotFound' || error.$metadata?.httpStatusCode === 404) {
|
|
return false;
|
|
}
|
|
this.logger.error(`Error checking if file exists: ${options.key}`, error);
|
|
throw error;
|
|
}
|
|
}
|
|
|
|
async list(bucket: string, prefix?: string): Promise<StorageObject[]> {
|
|
try {
|
|
const command = new ListObjectsV2Command({
|
|
Bucket: bucket,
|
|
Prefix: prefix,
|
|
});
|
|
|
|
const response = await this.s3Client.send(command);
|
|
const objects: StorageObject[] = [];
|
|
|
|
if (response.Contents) {
|
|
for (const item of response.Contents) {
|
|
if (item.Key) {
|
|
objects.push({
|
|
key: item.Key,
|
|
url: this.buildUrl(bucket, item.Key),
|
|
size: item.Size || 0,
|
|
lastModified: item.LastModified,
|
|
});
|
|
}
|
|
}
|
|
}
|
|
|
|
this.logger.log(
|
|
`Listed ${objects.length} objects from S3 bucket: ${bucket}${
|
|
prefix ? ` with prefix: ${prefix}` : ''
|
|
}`
|
|
);
|
|
return objects;
|
|
} catch (error) {
|
|
this.logger.error(`Failed to list objects from S3 bucket: ${bucket}`, error);
|
|
throw error;
|
|
}
|
|
}
|
|
|
|
private buildUrl(bucket: string, key: string): string {
|
|
const endpoint = this.configService.get<string>('AWS_S3_ENDPOINT');
|
|
const region = this.configService.get<string>('AWS_REGION', 'us-east-1');
|
|
|
|
if (endpoint) {
|
|
// MinIO or custom endpoint
|
|
return `${endpoint}/${bucket}/${key}`;
|
|
}
|
|
|
|
// AWS S3
|
|
return `https://${bucket}.s3.${region}.amazonaws.com/${key}`;
|
|
}
|
|
}
|