Compare commits
No commits in common. "435d5875013552143f5a33ebf56e8357d0fe65d5" and "f5eabf48616ede4bf8618510ccffd092f8b280c7" have entirely different histories.
435d587501
...
f5eabf4861
@ -6,26 +6,7 @@
|
|||||||
"Bash(npm run lint:*)",
|
"Bash(npm run lint:*)",
|
||||||
"Bash(npm run backend:lint)",
|
"Bash(npm run backend:lint)",
|
||||||
"Bash(npm run backend:build:*)",
|
"Bash(npm run backend:build:*)",
|
||||||
"Bash(npm run frontend:build:*)",
|
"Bash(npm run frontend:build:*)"
|
||||||
"Bash(rm:*)",
|
|
||||||
"Bash(git rm:*)",
|
|
||||||
"Bash(git add:*)",
|
|
||||||
"Bash(git commit:*)",
|
|
||||||
"Bash(git push:*)",
|
|
||||||
"Bash(npx tsc:*)",
|
|
||||||
"Bash(npx nest:*)",
|
|
||||||
"Read(//Users/david/Documents/xpeditis/**)",
|
|
||||||
"Bash(find:*)",
|
|
||||||
"Bash(npm test)",
|
|
||||||
"Bash(git checkout:*)",
|
|
||||||
"Bash(git reset:*)",
|
|
||||||
"Bash(curl:*)",
|
|
||||||
"Read(//private/tmp/**)",
|
|
||||||
"Bash(lsof:*)",
|
|
||||||
"Bash(awk:*)",
|
|
||||||
"Bash(xargs kill:*)",
|
|
||||||
"Read(//dev/**)",
|
|
||||||
"Bash(psql:*)"
|
|
||||||
],
|
],
|
||||||
"deny": [],
|
"deny": [],
|
||||||
"ask": []
|
"ask": []
|
||||||
|
|||||||
@ -1,216 +0,0 @@
|
|||||||
# 🔧 Fix Portainer Deployment Issues
|
|
||||||
|
|
||||||
## Problèmes Identifiés
|
|
||||||
|
|
||||||
### 1. ❌ Registry Mismatch (CRITIQUE)
|
|
||||||
**Problème**: Portainer essaie de pull les images depuis DockerHub au lieu de Scaleway Registry.
|
|
||||||
|
|
||||||
**Dans `docker/portainer-stack.yml`:**
|
|
||||||
```yaml
|
|
||||||
# ❌ INCORRECT (ligne 77):
|
|
||||||
image: weworkstudio/xpeditis-backend:preprod
|
|
||||||
|
|
||||||
# ❌ INCORRECT (ligne 136):
|
|
||||||
image: weworkstudio/xpeditis-frontend:latest
|
|
||||||
```
|
|
||||||
|
|
||||||
**CORRECTION REQUISE:**
|
|
||||||
```yaml
|
|
||||||
# ✅ CORRECT:
|
|
||||||
image: rg.fr-par.scw.cloud/weworkstudio/xpeditis-backend:preprod
|
|
||||||
|
|
||||||
# ✅ CORRECT:
|
|
||||||
image: rg.fr-par.scw.cloud/weworkstudio/xpeditis-frontend:preprod
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
### 2. ❌ Tag Frontend Incorrect (CRITIQUE)
|
|
||||||
**Problème**: Portainer demande `:latest` mais CI/CD ne crée ce tag QUE si `preprod` est la branche par défaut.
|
|
||||||
|
|
||||||
**CORRECTION REQUISE:**
|
|
||||||
```yaml
|
|
||||||
# Remplacer :latest par :preprod
|
|
||||||
image: rg.fr-par.scw.cloud/weworkstudio/xpeditis-frontend:preprod
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
### 3. ⚠️ Bucket S3 pour CSV Rates
|
|
||||||
**Problème**: Le code backend utilise `xpeditis-csv-rates` par défaut, mais Portainer configure `xpeditis-preprod-documents`.
|
|
||||||
|
|
||||||
**CORRECTION REQUISE dans `portainer-stack.yml`:**
|
|
||||||
```yaml
|
|
||||||
environment:
|
|
||||||
# Ajouter cette ligne:
|
|
||||||
AWS_S3_BUCKET: xpeditis-preprod-documents
|
|
||||||
# OU créer un bucket dédié CSV:
|
|
||||||
AWS_S3_CSV_BUCKET: xpeditis-csv-rates
|
|
||||||
```
|
|
||||||
|
|
||||||
**Option 1 - Utiliser le même bucket:**
|
|
||||||
Pas de changement de code, juste s'assurer que `AWS_S3_BUCKET=xpeditis-preprod-documents` est bien défini.
|
|
||||||
|
|
||||||
**Option 2 - Bucket séparé pour CSV (recommandé):**
|
|
||||||
1. Créer le bucket `xpeditis-csv-rates` dans MinIO
|
|
||||||
2. Ajouter `AWS_S3_CSV_BUCKET: xpeditis-csv-rates` dans les env vars
|
|
||||||
3. Modifier le code backend pour utiliser `AWS_S3_CSV_BUCKET`
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 📝 Fichier Corrigé: portainer-stack.yml
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
# Backend API (NestJS)
|
|
||||||
xpeditis-backend:
|
|
||||||
image: rg.fr-par.scw.cloud/weworkstudio/xpeditis-backend:preprod # ← FIXÉ
|
|
||||||
restart: unless-stopped
|
|
||||||
environment:
|
|
||||||
NODE_ENV: preprod
|
|
||||||
PORT: 4000
|
|
||||||
|
|
||||||
# Database
|
|
||||||
DATABASE_HOST: xpeditis-db
|
|
||||||
DATABASE_PORT: 5432
|
|
||||||
DATABASE_USER: xpeditis
|
|
||||||
DATABASE_PASSWORD: 9Lc3M9qoPBeHLKHDXGUf1
|
|
||||||
DATABASE_NAME: xpeditis_preprod
|
|
||||||
|
|
||||||
# Redis
|
|
||||||
REDIS_HOST: xpeditis-redis
|
|
||||||
REDIS_PORT: 6379
|
|
||||||
REDIS_PASSWORD: hXiy5GMPswMtxMZujjS2O
|
|
||||||
|
|
||||||
# JWT
|
|
||||||
JWT_SECRET: 4C4tQC8qym/evv4zI5DaUE1yy3kilEnm6lApOGD0GgNBLA0BLm2tVyUr1Lr0mTnV
|
|
||||||
|
|
||||||
# S3/MinIO
|
|
||||||
AWS_S3_ENDPOINT: http://xpeditis-minio:9000
|
|
||||||
AWS_REGION: us-east-1
|
|
||||||
AWS_ACCESS_KEY_ID: minioadmin_preprod_CHANGE_ME
|
|
||||||
AWS_SECRET_ACCESS_KEY: RBJfD0QVXC5JDfAHCwdUW
|
|
||||||
AWS_S3_BUCKET: xpeditis-csv-rates # ← FIXÉ pour CSV rates
|
|
||||||
|
|
||||||
# CORS
|
|
||||||
CORS_ORIGIN: https://app.preprod.xpeditis.com,https://www.preprod.xpeditis.com
|
|
||||||
|
|
||||||
# App URLs
|
|
||||||
FRONTEND_URL: https://app.preprod.xpeditis.com
|
|
||||||
API_URL: https://api.preprod.xpeditis.com
|
|
||||||
|
|
||||||
networks:
|
|
||||||
- xpeditis_internal
|
|
||||||
- traefik_network
|
|
||||||
# ... labels inchangés ...
|
|
||||||
|
|
||||||
# Frontend (Next.js)
|
|
||||||
xpeditis-frontend:
|
|
||||||
image: rg.fr-par.scw.cloud/weworkstudio/xpeditis-frontend:preprod # ← FIXÉ
|
|
||||||
restart: unless-stopped
|
|
||||||
environment:
|
|
||||||
NODE_ENV: preprod
|
|
||||||
NEXT_PUBLIC_API_URL: https://api.preprod.xpeditis.com
|
|
||||||
NEXT_PUBLIC_WS_URL: wss://api.preprod.xpeditis.com
|
|
||||||
networks:
|
|
||||||
- traefik_network
|
|
||||||
# ... labels inchangés ...
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 🚀 Étapes pour Déployer
|
|
||||||
|
|
||||||
### 1. Vérifier que les images existent dans Scaleway Registry
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Login au registry Scaleway
|
|
||||||
docker login rg.fr-par.scw.cloud/weworkstudio
|
|
||||||
|
|
||||||
# Vérifier les images disponibles (via Scaleway Console)
|
|
||||||
# https://console.scaleway.com/registry
|
|
||||||
```
|
|
||||||
|
|
||||||
### 2. Mettre à jour Portainer Stack
|
|
||||||
|
|
||||||
1. Ouvre Portainer: https://portainer.ton-domaine.com
|
|
||||||
2. Va dans **Stacks** → **xpeditis**
|
|
||||||
3. Clique sur **Editor**
|
|
||||||
4. Remplace les lignes 77 et 136 avec les images corrigées
|
|
||||||
5. **Deploy the stack** (ou **Update the stack**)
|
|
||||||
|
|
||||||
### 3. Créer le bucket MinIO pour CSV
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Accède à MinIO Console
|
|
||||||
# https://minio.preprod.xpeditis.com
|
|
||||||
|
|
||||||
# Login avec:
|
|
||||||
# User: minioadmin_preprod_CHANGE_ME
|
|
||||||
# Password: RBJfD0QVXC5JDfAHCwdUW
|
|
||||||
|
|
||||||
# Créer le bucket "xpeditis-csv-rates"
|
|
||||||
# Settings → Public Access: Private
|
|
||||||
```
|
|
||||||
|
|
||||||
### 4. Vérifier le déploiement
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Vérifier les containers
|
|
||||||
docker ps | grep xpeditis
|
|
||||||
|
|
||||||
# Vérifier les logs backend
|
|
||||||
docker logs xpeditis-backend -f --tail=100
|
|
||||||
|
|
||||||
# Vérifier les logs frontend
|
|
||||||
docker logs xpeditis-frontend -f --tail=100
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 🐛 Debugging si ça ne fonctionne toujours pas
|
|
||||||
|
|
||||||
### Vérifier l'accès au registry
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Teste manuellement le pull de l'image
|
|
||||||
docker pull rg.fr-par.scw.cloud/weworkstudio/xpeditis-backend:preprod
|
|
||||||
docker pull rg.fr-par.scw.cloud/weworkstudio/xpeditis-frontend:preprod
|
|
||||||
```
|
|
||||||
|
|
||||||
### Vérifier que les tags existent
|
|
||||||
|
|
||||||
Regarde dans GitHub Actions → Dernière exécution → Backend job:
|
|
||||||
```
|
|
||||||
Build and push Backend Docker image
|
|
||||||
tags: rg.fr-par.scw.cloud/weworkstudio/xpeditis-backend:preprod
|
|
||||||
```
|
|
||||||
|
|
||||||
### Erreur commune: "manifest unknown"
|
|
||||||
|
|
||||||
Si tu vois cette erreur, c'est que le tag n'existe pas. Solutions:
|
|
||||||
1. Push manuellement vers la branche `preprod` pour déclencher le CI/CD
|
|
||||||
2. Vérifier que le workflow GitHub Actions s'est bien exécuté
|
|
||||||
3. Vérifier le secret `REGISTRY_TOKEN` dans GitHub Settings
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 📋 Checklist de Déploiement
|
|
||||||
|
|
||||||
- [ ] Corriger `portainer-stack.yml` lignes 77 et 136 avec le registry Scaleway
|
|
||||||
- [ ] Changer le tag frontend de `:latest` à `:preprod`
|
|
||||||
- [ ] Créer le bucket MinIO `xpeditis-csv-rates`
|
|
||||||
- [ ] Mettre à jour la stack dans Portainer
|
|
||||||
- [ ] Vérifier que les containers démarrent correctement
|
|
||||||
- [ ] Tester l'upload d'un fichier CSV via le dashboard admin
|
|
||||||
- [ ] Vérifier que le CSV apparaît dans MinIO
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 🔐 Note sur les Credentials
|
|
||||||
|
|
||||||
Les credentials dans `portainer-stack.yml` contiennent:
|
|
||||||
- Passwords de production (PostgreSQL, Redis, MinIO)
|
|
||||||
- JWT Secret de production
|
|
||||||
- Access Keys MinIO
|
|
||||||
|
|
||||||
**IMPORTANT**: Change ces credentials IMMÉDIATEMENT si ce repo est public ou accessible par des tiers!
|
|
||||||
@ -38,8 +38,6 @@ import {
|
|||||||
CsvFileValidationDto,
|
CsvFileValidationDto,
|
||||||
} from '../../dto/csv-rate-upload.dto';
|
} from '../../dto/csv-rate-upload.dto';
|
||||||
import { CsvRateMapper } from '../../mappers/csv-rate.mapper';
|
import { CsvRateMapper } from '../../mappers/csv-rate.mapper';
|
||||||
import { S3StorageAdapter } from '@infrastructure/storage/s3-storage.adapter';
|
|
||||||
import { ConfigService } from '@nestjs/config';
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* CSV Rates Admin Controller
|
* CSV Rates Admin Controller
|
||||||
@ -59,9 +57,7 @@ export class CsvRatesAdminController {
|
|||||||
private readonly csvLoader: CsvRateLoaderAdapter,
|
private readonly csvLoader: CsvRateLoaderAdapter,
|
||||||
private readonly csvConverter: CsvConverterService,
|
private readonly csvConverter: CsvConverterService,
|
||||||
private readonly csvConfigRepository: TypeOrmCsvRateConfigRepository,
|
private readonly csvConfigRepository: TypeOrmCsvRateConfigRepository,
|
||||||
private readonly csvRateMapper: CsvRateMapper,
|
private readonly csvRateMapper: CsvRateMapper
|
||||||
private readonly s3Storage: S3StorageAdapter,
|
|
||||||
private readonly configService: ConfigService
|
|
||||||
) {}
|
) {}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -201,34 +197,6 @@ export class CsvRatesAdminController {
|
|||||||
fs.renameSync(filePathToValidate, finalPath);
|
fs.renameSync(filePathToValidate, finalPath);
|
||||||
this.logger.log(`Renamed ${file.filename} to ${finalFilename}`);
|
this.logger.log(`Renamed ${file.filename} to ${finalFilename}`);
|
||||||
|
|
||||||
// Upload CSV file to MinIO/S3
|
|
||||||
let minioObjectKey: string | null = null;
|
|
||||||
try {
|
|
||||||
const csvBuffer = fs.readFileSync(finalPath);
|
|
||||||
const bucket = this.configService.get<string>('AWS_S3_BUCKET', 'xpeditis-csv-rates');
|
|
||||||
const objectKey = `csv-rates/${finalFilename}`;
|
|
||||||
|
|
||||||
await this.s3Storage.upload({
|
|
||||||
bucket,
|
|
||||||
key: objectKey,
|
|
||||||
body: csvBuffer,
|
|
||||||
contentType: 'text/csv',
|
|
||||||
metadata: {
|
|
||||||
companyName: dto.companyName,
|
|
||||||
companyEmail: dto.companyEmail,
|
|
||||||
uploadedBy: user.email,
|
|
||||||
uploadedAt: new Date().toISOString(),
|
|
||||||
},
|
|
||||||
});
|
|
||||||
|
|
||||||
minioObjectKey = objectKey;
|
|
||||||
this.logger.log(`✅ CSV file uploaded to MinIO: ${bucket}/${objectKey}`);
|
|
||||||
} catch (error: any) {
|
|
||||||
this.logger.error(`⚠️ Failed to upload CSV to MinIO (will continue with local storage): ${error.message}`);
|
|
||||||
// Don't fail the entire operation if MinIO upload fails
|
|
||||||
// The file is still available locally
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if config exists for this company
|
// Check if config exists for this company
|
||||||
const existingConfig = await this.csvConfigRepository.findByCompanyName(dto.companyName);
|
const existingConfig = await this.csvConfigRepository.findByCompanyName(dto.companyName);
|
||||||
|
|
||||||
@ -243,7 +211,6 @@ export class CsvRatesAdminController {
|
|||||||
metadata: {
|
metadata: {
|
||||||
...existingConfig.metadata,
|
...existingConfig.metadata,
|
||||||
companyEmail: dto.companyEmail, // Store email in metadata
|
companyEmail: dto.companyEmail, // Store email in metadata
|
||||||
minioObjectKey, // Store MinIO object key
|
|
||||||
lastUpload: {
|
lastUpload: {
|
||||||
timestamp: new Date().toISOString(),
|
timestamp: new Date().toISOString(),
|
||||||
by: user.email,
|
by: user.email,
|
||||||
@ -270,7 +237,6 @@ export class CsvRatesAdminController {
|
|||||||
uploadedBy: user.email,
|
uploadedBy: user.email,
|
||||||
description: `${dto.companyName} shipping rates`,
|
description: `${dto.companyName} shipping rates`,
|
||||||
companyEmail: dto.companyEmail, // Store email in metadata
|
companyEmail: dto.companyEmail, // Store email in metadata
|
||||||
minioObjectKey, // Store MinIO object key
|
|
||||||
},
|
},
|
||||||
});
|
});
|
||||||
|
|
||||||
@ -526,19 +492,6 @@ export class CsvRatesAdminController {
|
|||||||
this.logger.error(`Failed to delete file ${filePath}: ${error.message}`);
|
this.logger.error(`Failed to delete file ${filePath}: ${error.message}`);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delete from MinIO/S3 if it exists there
|
|
||||||
const minioObjectKey = config.metadata?.minioObjectKey as string | undefined;
|
|
||||||
if (minioObjectKey) {
|
|
||||||
try {
|
|
||||||
const bucket = this.configService.get<string>('AWS_S3_BUCKET', 'xpeditis-csv-rates');
|
|
||||||
await this.s3Storage.delete({ bucket, key: minioObjectKey });
|
|
||||||
this.logger.log(`✅ Deleted file from MinIO: ${bucket}/${minioObjectKey}`);
|
|
||||||
} catch (error: any) {
|
|
||||||
this.logger.error(`⚠️ Failed to delete file from MinIO: ${error.message}`);
|
|
||||||
// Don't fail the operation if MinIO deletion fails
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete the configuration
|
// Delete the configuration
|
||||||
await this.csvConfigRepository.delete(config.companyName);
|
await this.csvConfigRepository.delete(config.companyName);
|
||||||
|
|
||||||
|
|||||||
@ -143,20 +143,7 @@ export class CsvRateSearchService implements SearchCsvRatesPort {
|
|||||||
// Pass company name from config to override CSV column value
|
// Pass company name from config to override CSV column value
|
||||||
return this.csvRateLoader.loadRatesFromCsv(config.csvFilePath, email, config.companyName);
|
return this.csvRateLoader.loadRatesFromCsv(config.csvFilePath, email, config.companyName);
|
||||||
});
|
});
|
||||||
|
const rateArrays = await Promise.all(ratePromises);
|
||||||
// Use allSettled to handle missing files gracefully
|
|
||||||
const results = await Promise.allSettled(ratePromises);
|
|
||||||
const rateArrays = results
|
|
||||||
.filter((result): result is PromiseFulfilledResult<CsvRate[]> => result.status === 'fulfilled')
|
|
||||||
.map(result => result.value);
|
|
||||||
|
|
||||||
// Log any failed file loads
|
|
||||||
const failures = results.filter(result => result.status === 'rejected');
|
|
||||||
if (failures.length > 0) {
|
|
||||||
console.warn(`Failed to load ${failures.length} CSV files:`,
|
|
||||||
failures.map((f, idx) => `${configs[idx]?.csvFilePath}: ${(f as PromiseRejectedResult).reason}`));
|
|
||||||
}
|
|
||||||
|
|
||||||
return rateArrays.flat();
|
return rateArrays.flat();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -165,13 +152,7 @@ export class CsvRateSearchService implements SearchCsvRatesPort {
|
|||||||
const ratePromises = files.map(file =>
|
const ratePromises = files.map(file =>
|
||||||
this.csvRateLoader.loadRatesFromCsv(file, 'bookings@example.com')
|
this.csvRateLoader.loadRatesFromCsv(file, 'bookings@example.com')
|
||||||
);
|
);
|
||||||
|
const rateArrays = await Promise.all(ratePromises);
|
||||||
// Use allSettled here too for consistency
|
|
||||||
const results = await Promise.allSettled(ratePromises);
|
|
||||||
const rateArrays = results
|
|
||||||
.filter((result): result is PromiseFulfilledResult<CsvRate[]> => result.status === 'fulfilled')
|
|
||||||
.map(result => result.value);
|
|
||||||
|
|
||||||
return rateArrays.flat();
|
return rateArrays.flat();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -1,5 +1,4 @@
|
|||||||
import { Injectable, Logger, Optional } from '@nestjs/common';
|
import { Injectable, Logger } from '@nestjs/common';
|
||||||
import { ConfigService } from '@nestjs/config';
|
|
||||||
import { parse } from 'csv-parse/sync';
|
import { parse } from 'csv-parse/sync';
|
||||||
import * as fs from 'fs/promises';
|
import * as fs from 'fs/promises';
|
||||||
import * as path from 'path';
|
import * as path from 'path';
|
||||||
@ -10,8 +9,6 @@ import { ContainerType } from '@domain/value-objects/container-type.vo';
|
|||||||
import { Money } from '@domain/value-objects/money.vo';
|
import { Money } from '@domain/value-objects/money.vo';
|
||||||
import { Surcharge, SurchargeType, SurchargeCollection } from '@domain/value-objects/surcharge.vo';
|
import { Surcharge, SurchargeType, SurchargeCollection } from '@domain/value-objects/surcharge.vo';
|
||||||
import { DateRange } from '@domain/value-objects/date-range.vo';
|
import { DateRange } from '@domain/value-objects/date-range.vo';
|
||||||
import { S3StorageAdapter } from '@infrastructure/storage/s3-storage.adapter';
|
|
||||||
import { TypeOrmCsvRateConfigRepository } from '@infrastructure/persistence/typeorm/repositories/typeorm-csv-rate-config.repository';
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* CSV Row Interface
|
* CSV Row Interface
|
||||||
@ -66,11 +63,7 @@ export class CsvRateLoaderAdapter implements CsvRateLoaderPort {
|
|||||||
['NVO Consolidation', 'nvo-consolidation.csv'],
|
['NVO Consolidation', 'nvo-consolidation.csv'],
|
||||||
]);
|
]);
|
||||||
|
|
||||||
constructor(
|
constructor() {
|
||||||
@Optional() private readonly s3Storage?: S3StorageAdapter,
|
|
||||||
@Optional() private readonly configService?: ConfigService,
|
|
||||||
@Optional() private readonly csvConfigRepository?: TypeOrmCsvRateConfigRepository
|
|
||||||
) {
|
|
||||||
// CSV files are stored in infrastructure/storage/csv-storage/rates/
|
// CSV files are stored in infrastructure/storage/csv-storage/rates/
|
||||||
// Use absolute path based on project root (works in both dev and production)
|
// Use absolute path based on project root (works in both dev and production)
|
||||||
// In production, process.cwd() points to the backend app directory
|
// In production, process.cwd() points to the backend app directory
|
||||||
@ -84,50 +77,18 @@ export class CsvRateLoaderAdapter implements CsvRateLoaderPort {
|
|||||||
'rates'
|
'rates'
|
||||||
);
|
);
|
||||||
this.logger.log(`CSV directory initialized: ${this.csvDirectory}`);
|
this.logger.log(`CSV directory initialized: ${this.csvDirectory}`);
|
||||||
|
|
||||||
if (this.s3Storage && this.configService) {
|
|
||||||
this.logger.log('✅ MinIO/S3 storage support enabled for CSV files');
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async loadRatesFromCsv(filePath: string, companyEmail: string, companyNameOverride?: string): Promise<CsvRate[]> {
|
async loadRatesFromCsv(filePath: string, companyEmail: string, companyNameOverride?: string): Promise<CsvRate[]> {
|
||||||
this.logger.log(`Loading rates from CSV: ${filePath} (email: ${companyEmail}, company: ${companyNameOverride || 'from CSV'})`);
|
this.logger.log(`Loading rates from CSV: ${filePath} (email: ${companyEmail}, company: ${companyNameOverride || 'from CSV'})`);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
let fileContent: string;
|
// Read CSV file
|
||||||
|
const fullPath = path.isAbsolute(filePath)
|
||||||
|
? filePath
|
||||||
|
: path.join(this.csvDirectory, filePath);
|
||||||
|
|
||||||
// Try to load from MinIO first if configured
|
const fileContent = await fs.readFile(fullPath, 'utf-8');
|
||||||
if (this.s3Storage && this.configService && this.csvConfigRepository && companyNameOverride) {
|
|
||||||
try {
|
|
||||||
const config = await this.csvConfigRepository.findByCompanyName(companyNameOverride);
|
|
||||||
const minioObjectKey = config?.metadata?.minioObjectKey as string | undefined;
|
|
||||||
|
|
||||||
if (minioObjectKey) {
|
|
||||||
const bucket = this.configService.get<string>('AWS_S3_BUCKET', 'xpeditis-csv-rates');
|
|
||||||
this.logger.log(`📥 Loading CSV from MinIO: ${bucket}/${minioObjectKey}`);
|
|
||||||
|
|
||||||
const buffer = await this.s3Storage.download({ bucket, key: minioObjectKey });
|
|
||||||
fileContent = buffer.toString('utf-8');
|
|
||||||
this.logger.log(`✅ Successfully loaded CSV from MinIO`);
|
|
||||||
} else {
|
|
||||||
// Fallback to local file
|
|
||||||
throw new Error('No MinIO object key found, using local file');
|
|
||||||
}
|
|
||||||
} catch (minioError: any) {
|
|
||||||
this.logger.warn(`⚠️ Failed to load from MinIO: ${minioError.message}. Falling back to local file.`);
|
|
||||||
// Fallback to local file system
|
|
||||||
const fullPath = path.isAbsolute(filePath)
|
|
||||||
? filePath
|
|
||||||
: path.join(this.csvDirectory, filePath);
|
|
||||||
fileContent = await fs.readFile(fullPath, 'utf-8');
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Read from local file system
|
|
||||||
const fullPath = path.isAbsolute(filePath)
|
|
||||||
? filePath
|
|
||||||
: path.join(this.csvDirectory, filePath);
|
|
||||||
fileContent = await fs.readFile(fullPath, 'utf-8');
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse CSV
|
// Parse CSV
|
||||||
const records: CsvRow[] = parse(fileContent, {
|
const records: CsvRow[] = parse(fileContent, {
|
||||||
|
|||||||
@ -1,6 +1,5 @@
|
|||||||
import { Module } from '@nestjs/common';
|
import { Module } from '@nestjs/common';
|
||||||
import { TypeOrmModule } from '@nestjs/typeorm';
|
import { TypeOrmModule } from '@nestjs/typeorm';
|
||||||
import { ConfigModule } from '@nestjs/config';
|
|
||||||
|
|
||||||
// Domain Services
|
// Domain Services
|
||||||
import { CsvRateSearchService } from '@domain/services/csv-rate-search.service';
|
import { CsvRateSearchService } from '@domain/services/csv-rate-search.service';
|
||||||
@ -9,7 +8,6 @@ import { CsvRateSearchService } from '@domain/services/csv-rate-search.service';
|
|||||||
import { CsvRateLoaderAdapter } from './csv-rate-loader.adapter';
|
import { CsvRateLoaderAdapter } from './csv-rate-loader.adapter';
|
||||||
import { CsvConverterService } from './csv-converter.service';
|
import { CsvConverterService } from './csv-converter.service';
|
||||||
import { TypeOrmCsvRateConfigRepository } from '@infrastructure/persistence/typeorm/repositories/typeorm-csv-rate-config.repository';
|
import { TypeOrmCsvRateConfigRepository } from '@infrastructure/persistence/typeorm/repositories/typeorm-csv-rate-config.repository';
|
||||||
import { StorageModule } from '@infrastructure/storage/storage.module';
|
|
||||||
|
|
||||||
// Application Layer
|
// Application Layer
|
||||||
import { CsvRateMapper } from '@application/mappers/csv-rate.mapper';
|
import { CsvRateMapper } from '@application/mappers/csv-rate.mapper';
|
||||||
@ -34,10 +32,6 @@ import { CsvRateConfigOrmEntity } from '@infrastructure/persistence/typeorm/enti
|
|||||||
imports: [
|
imports: [
|
||||||
// TypeORM entities
|
// TypeORM entities
|
||||||
TypeOrmModule.forFeature([CsvRateConfigOrmEntity]),
|
TypeOrmModule.forFeature([CsvRateConfigOrmEntity]),
|
||||||
// Storage for MinIO/S3 support
|
|
||||||
StorageModule,
|
|
||||||
// Config for S3 configuration
|
|
||||||
ConfigModule,
|
|
||||||
],
|
],
|
||||||
providers: [
|
providers: [
|
||||||
// Infrastructure Adapters (must be before services that depend on them)
|
// Infrastructure Adapters (must be before services that depend on them)
|
||||||
|
|||||||
@ -12,12 +12,11 @@ import { STORAGE_PORT } from '@domain/ports/out/storage.port';
|
|||||||
@Module({
|
@Module({
|
||||||
imports: [ConfigModule],
|
imports: [ConfigModule],
|
||||||
providers: [
|
providers: [
|
||||||
S3StorageAdapter, // Add direct provider for dependency injection
|
|
||||||
{
|
{
|
||||||
provide: STORAGE_PORT,
|
provide: STORAGE_PORT,
|
||||||
useClass: S3StorageAdapter,
|
useClass: S3StorageAdapter,
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
exports: [STORAGE_PORT, S3StorageAdapter], // Export both token and class
|
exports: [STORAGE_PORT],
|
||||||
})
|
})
|
||||||
export class StorageModule {}
|
export class StorageModule {}
|
||||||
|
|||||||
@ -1,42 +0,0 @@
|
|||||||
import { NestFactory } from '@nestjs/core';
|
|
||||||
import { AppModule } from '../app.module';
|
|
||||||
import { TypeOrmCsvRateConfigRepository } from '@infrastructure/persistence/typeorm/repositories/typeorm-csv-rate-config.repository';
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Script to delete orphaned CSV rate configuration
|
|
||||||
* Usage: npm run ts-node src/scripts/delete-orphaned-csv-config.ts
|
|
||||||
*/
|
|
||||||
async function deleteOrphanedConfig() {
|
|
||||||
const app = await NestFactory.createApplicationContext(AppModule);
|
|
||||||
const repository = app.get(TypeOrmCsvRateConfigRepository);
|
|
||||||
|
|
||||||
try {
|
|
||||||
console.log('🔍 Searching for orphaned test.csv configuration...');
|
|
||||||
|
|
||||||
const configs = await repository.findAll();
|
|
||||||
const orphanedConfig = configs.find((c) => c.csvFilePath === 'test.csv');
|
|
||||||
|
|
||||||
if (!orphanedConfig) {
|
|
||||||
console.log('✅ No orphaned test.csv configuration found');
|
|
||||||
await app.close();
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
console.log(`📄 Found orphaned config: ${orphanedConfig.companyName} - ${orphanedConfig.csvFilePath}`);
|
|
||||||
console.log(` ID: ${orphanedConfig.id}`);
|
|
||||||
console.log(` Uploaded: ${orphanedConfig.uploadedAt}`);
|
|
||||||
|
|
||||||
// Delete the orphaned configuration
|
|
||||||
await repository.delete(orphanedConfig.companyName);
|
|
||||||
|
|
||||||
console.log('✅ Successfully deleted orphaned test.csv configuration');
|
|
||||||
|
|
||||||
} catch (error: any) {
|
|
||||||
console.error('❌ Error deleting orphaned config:', error.message);
|
|
||||||
process.exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
await app.close();
|
|
||||||
}
|
|
||||||
|
|
||||||
deleteOrphanedConfig();
|
|
||||||
@ -1,118 +0,0 @@
|
|||||||
import { NestFactory } from '@nestjs/core';
|
|
||||||
import { AppModule } from '../app.module';
|
|
||||||
import { S3StorageAdapter } from '@infrastructure/storage/s3-storage.adapter';
|
|
||||||
import { TypeOrmCsvRateConfigRepository } from '@infrastructure/persistence/typeorm/repositories/typeorm-csv-rate-config.repository';
|
|
||||||
import { ConfigService } from '@nestjs/config';
|
|
||||||
import * as fs from 'fs';
|
|
||||||
import * as path from 'path';
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Script to migrate existing CSV files to MinIO
|
|
||||||
* Usage: npm run ts-node src/scripts/migrate-csv-to-minio.ts
|
|
||||||
*/
|
|
||||||
async function migrateCsvFilesToMinio() {
|
|
||||||
const app = await NestFactory.createApplicationContext(AppModule);
|
|
||||||
const s3Storage = app.get(S3StorageAdapter);
|
|
||||||
const csvConfigRepository = app.get(TypeOrmCsvRateConfigRepository);
|
|
||||||
const configService = app.get(ConfigService);
|
|
||||||
|
|
||||||
try {
|
|
||||||
console.log('🚀 Starting CSV migration to MinIO...\n');
|
|
||||||
|
|
||||||
const bucket = configService.get<string>('AWS_S3_BUCKET', 'xpeditis-csv-rates');
|
|
||||||
const csvDirectory = path.join(
|
|
||||||
process.cwd(),
|
|
||||||
'src',
|
|
||||||
'infrastructure',
|
|
||||||
'storage',
|
|
||||||
'csv-storage',
|
|
||||||
'rates'
|
|
||||||
);
|
|
||||||
|
|
||||||
// Get all CSV configurations
|
|
||||||
const configs = await csvConfigRepository.findAll();
|
|
||||||
console.log(`📋 Found ${configs.length} CSV configurations\n`);
|
|
||||||
|
|
||||||
let migratedCount = 0;
|
|
||||||
let skippedCount = 0;
|
|
||||||
let errorCount = 0;
|
|
||||||
|
|
||||||
for (const config of configs) {
|
|
||||||
const filename = config.csvFilePath;
|
|
||||||
const filePath = path.join(csvDirectory, filename);
|
|
||||||
|
|
||||||
console.log(`📄 Processing: ${config.companyName} - ${filename}`);
|
|
||||||
|
|
||||||
// Check if already in MinIO
|
|
||||||
const existingMinioKey = config.metadata?.minioObjectKey as string | undefined;
|
|
||||||
if (existingMinioKey) {
|
|
||||||
console.log(` ⏭️ Already in MinIO: ${existingMinioKey}`);
|
|
||||||
skippedCount++;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if file exists locally
|
|
||||||
if (!fs.existsSync(filePath)) {
|
|
||||||
console.log(` ⚠️ Local file not found: ${filePath}`);
|
|
||||||
errorCount++;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
try {
|
|
||||||
// Read local file
|
|
||||||
const fileBuffer = fs.readFileSync(filePath);
|
|
||||||
const objectKey = `csv-rates/${filename}`;
|
|
||||||
|
|
||||||
// Upload to MinIO
|
|
||||||
await s3Storage.upload({
|
|
||||||
bucket,
|
|
||||||
key: objectKey,
|
|
||||||
body: fileBuffer,
|
|
||||||
contentType: 'text/csv',
|
|
||||||
metadata: {
|
|
||||||
companyName: config.companyName,
|
|
||||||
uploadedBy: 'migration-script',
|
|
||||||
migratedAt: new Date().toISOString(),
|
|
||||||
},
|
|
||||||
});
|
|
||||||
|
|
||||||
// Update configuration with MinIO object key
|
|
||||||
await csvConfigRepository.update(config.id, {
|
|
||||||
metadata: {
|
|
||||||
...config.metadata,
|
|
||||||
minioObjectKey: objectKey,
|
|
||||||
migratedToMinioAt: new Date().toISOString(),
|
|
||||||
},
|
|
||||||
});
|
|
||||||
|
|
||||||
console.log(` ✅ Uploaded to MinIO: ${bucket}/${objectKey}`);
|
|
||||||
migratedCount++;
|
|
||||||
} catch (error: any) {
|
|
||||||
console.log(` ❌ Error uploading ${filename}: ${error.message}`);
|
|
||||||
errorCount++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
console.log('\n' + '='.repeat(60));
|
|
||||||
console.log('📊 Migration Summary:');
|
|
||||||
console.log(` ✅ Migrated: ${migratedCount}`);
|
|
||||||
console.log(` ⏭️ Skipped (already in MinIO): ${skippedCount}`);
|
|
||||||
console.log(` ❌ Errors: ${errorCount}`);
|
|
||||||
console.log('='.repeat(60) + '\n');
|
|
||||||
|
|
||||||
if (migratedCount > 0) {
|
|
||||||
console.log('🎉 Migration completed successfully!');
|
|
||||||
} else if (skippedCount === configs.length) {
|
|
||||||
console.log('✅ All files are already in MinIO');
|
|
||||||
} else {
|
|
||||||
console.log('⚠️ Migration completed with errors');
|
|
||||||
}
|
|
||||||
} catch (error: any) {
|
|
||||||
console.error('❌ Migration failed:', error.message);
|
|
||||||
process.exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
await app.close();
|
|
||||||
}
|
|
||||||
|
|
||||||
migrateCsvFilesToMinio();
|
|
||||||
@ -8,9 +8,9 @@ services:
|
|||||||
volumes:
|
volumes:
|
||||||
- xpeditis_db_data:/var/lib/postgresql/data
|
- xpeditis_db_data:/var/lib/postgresql/data
|
||||||
environment:
|
environment:
|
||||||
POSTGRES_DB: xpeditis_preprod
|
POSTGRES_DB: xpeditis_prod
|
||||||
POSTGRES_USER: xpeditis
|
POSTGRES_USER: xpeditis
|
||||||
POSTGRES_PASSWORD: 9Lc3M9qoPBeHLKHDXGUf1
|
POSTGRES_PASSWORD: xpeditis_prod_password_CHANGE_ME
|
||||||
PGDATA: /var/lib/postgresql/data/pgdata
|
PGDATA: /var/lib/postgresql/data/pgdata
|
||||||
networks:
|
networks:
|
||||||
- xpeditis_internal
|
- xpeditis_internal
|
||||||
@ -23,18 +23,33 @@ services:
|
|||||||
placement:
|
placement:
|
||||||
constraints:
|
constraints:
|
||||||
- node.role == manager
|
- node.role == manager
|
||||||
|
restart_policy:
|
||||||
|
condition: on-failure
|
||||||
|
delay: 5s
|
||||||
|
max_attempts: 3
|
||||||
|
|
||||||
# Redis Cache
|
# Redis Cache
|
||||||
xpeditis-redis:
|
xpeditis-redis:
|
||||||
image: redis:7-alpine
|
image: redis:7-alpine
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
command: redis-server --requirepass hXiy5GMPswMtxMZujjS2O --appendonly yes
|
command: redis-server --requirepass xpeditis_redis_password_CHANGE_ME --appendonly yes
|
||||||
volumes:
|
volumes:
|
||||||
- xpeditis_redis_data:/data
|
- xpeditis_redis_data:/data
|
||||||
networks:
|
networks:
|
||||||
- xpeditis_internal
|
- xpeditis_internal
|
||||||
healthcheck:
|
healthcheck:
|
||||||
test: ["CMD", "redis-cli", "--raw", "incr", "ping"]
|
test: ["CMD", "redis-cli", "--raw", "incr", "ping"]
|
||||||
|
interval: 10s
|
||||||
|
timeout: 3s
|
||||||
|
retries: 5
|
||||||
|
deploy:
|
||||||
|
placement:
|
||||||
|
constraints:
|
||||||
|
- node.role == manager
|
||||||
|
restart_policy:
|
||||||
|
condition: on-failure
|
||||||
|
delay: 5s
|
||||||
|
max_attempts: 3
|
||||||
|
|
||||||
# MinIO S3 Storage
|
# MinIO S3 Storage
|
||||||
xpeditis-minio:
|
xpeditis-minio:
|
||||||
@ -44,125 +59,245 @@ services:
|
|||||||
volumes:
|
volumes:
|
||||||
- xpeditis_minio_data:/data
|
- xpeditis_minio_data:/data
|
||||||
environment:
|
environment:
|
||||||
MINIO_ROOT_USER: minioadmin_preprod_CHANGE_ME
|
MINIO_ROOT_USER: minioadmin_CHANGE_ME
|
||||||
MINIO_ROOT_PASSWORD: RBJfD0QVXC5JDfAHCwdUW
|
MINIO_ROOT_PASSWORD: minioadmin_password_CHANGE_ME
|
||||||
networks:
|
networks:
|
||||||
- xpeditis_internal
|
- xpeditis_internal
|
||||||
- traefik_network
|
- traefik_network
|
||||||
labels:
|
labels:
|
||||||
- "traefik.enable=true"
|
|
||||||
|
|
||||||
# MinIO API
|
# MinIO API
|
||||||
- "traefik.http.routers.xpeditis-minio-api.rule=Host(`s3.preprod.xpeditis.com`)"
|
- "traefik.enable=true"
|
||||||
|
- "traefik.http.routers.xpeditis-minio-api.rule=Host(`s3.xpeditis.com`)"
|
||||||
- "traefik.http.routers.xpeditis-minio-api.entrypoints=websecure"
|
- "traefik.http.routers.xpeditis-minio-api.entrypoints=websecure"
|
||||||
- "traefik.http.routers.xpeditis-minio-api.tls=true"
|
- "traefik.http.routers.xpeditis-minio-api.tls=true"
|
||||||
- "traefik.http.routers.xpeditis-minio-api.tls.certresolver=letsencrypt"
|
- "traefik.http.routers.xpeditis-minio-api.tls.certresolver=letsencrypt"
|
||||||
|
- "traefik.http.routers.xpeditis-minio-api.service=xpeditis-minio-api"
|
||||||
- "traefik.http.services.xpeditis-minio-api.loadbalancer.server.port=9000"
|
- "traefik.http.services.xpeditis-minio-api.loadbalancer.server.port=9000"
|
||||||
|
|
||||||
# MinIO Console
|
# MinIO Console
|
||||||
- "traefik.http.routers.xpeditis-minio-console.rule=Host(`minio.preprod.xpeditis.com`)"
|
- "traefik.http.routers.xpeditis-minio-console.rule=Host(`minio.xpeditis.com`)"
|
||||||
- "traefik.http.routers.xpeditis-minio-console.entrypoints=websecure"
|
- "traefik.http.routers.xpeditis-minio-console.entrypoints=websecure"
|
||||||
- "traefik.http.routers.xpeditis-minio-console.tls=true"
|
- "traefik.http.routers.xpeditis-minio-console.tls=true"
|
||||||
- "traefik.http.routers.xpeditis-minio-console.tls.certresolver=letsencrypt"
|
- "traefik.http.routers.xpeditis-minio-console.tls.certresolver=letsencrypt"
|
||||||
|
- "traefik.http.routers.xpeditis-minio-console.service=xpeditis-minio-console"
|
||||||
- "traefik.http.services.xpeditis-minio-console.loadbalancer.server.port=9001"
|
- "traefik.http.services.xpeditis-minio-console.loadbalancer.server.port=9001"
|
||||||
|
|
||||||
# HTTP → HTTPS
|
|
||||||
- "traefik.http.routers.xpeditis-minio-http.rule=Host(`s3.preprod.xpeditis.com`) || Host(`minio.preprod.xpeditis.com`)"
|
|
||||||
- "traefik.http.middlewares.xpeditis-redirect.redirectscheme.scheme=https"
|
|
||||||
|
|
||||||
- "traefik.docker.network=traefik_network"
|
- "traefik.docker.network=traefik_network"
|
||||||
|
# HTTP to HTTPS redirect
|
||||||
|
- "traefik.http.routers.xpeditis-minio-http.rule=Host(`s3.xpeditis.com`) || Host(`minio.xpeditis.com`)"
|
||||||
|
- "traefik.http.routers.xpeditis-minio-http.entrypoints=web"
|
||||||
|
- "traefik.http.routers.xpeditis-minio-http.middlewares=xpeditis-redirect"
|
||||||
|
- "traefik.http.middlewares.xpeditis-redirect.redirectscheme.scheme=https"
|
||||||
|
- "traefik.http.middlewares.xpeditis-redirect.redirectscheme.permanent=true"
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 20s
|
||||||
|
retries: 3
|
||||||
|
deploy:
|
||||||
|
placement:
|
||||||
|
constraints:
|
||||||
|
- node.role == manager
|
||||||
|
restart_policy:
|
||||||
|
condition: on-failure
|
||||||
|
delay: 5s
|
||||||
|
max_attempts: 3
|
||||||
|
|
||||||
# Backend API (NestJS)
|
# Backend API (NestJS)
|
||||||
xpeditis-backend:
|
xpeditis-backend:
|
||||||
image: rg.fr-par.scw.cloud/weworkstudio/xpeditis-backend:preprod
|
image: xpeditis/backend:latest
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
environment:
|
environment:
|
||||||
NODE_ENV: preprod
|
# Node Environment
|
||||||
|
NODE_ENV: production
|
||||||
PORT: 4000
|
PORT: 4000
|
||||||
|
|
||||||
# Database
|
# Database
|
||||||
DATABASE_HOST: xpeditis-db
|
DATABASE_HOST: xpeditis-db
|
||||||
DATABASE_PORT: 5432
|
DATABASE_PORT: 5432
|
||||||
DATABASE_USER: xpeditis
|
DATABASE_USER: xpeditis
|
||||||
DATABASE_PASSWORD: 9Lc3M9qoPBeHLKHDXGUf1
|
DATABASE_PASSWORD: xpeditis_prod_password_CHANGE_ME
|
||||||
DATABASE_NAME: xpeditis_preprod
|
DATABASE_NAME: xpeditis_prod
|
||||||
|
DATABASE_SSL: "false"
|
||||||
|
DATABASE_SYNC: "false"
|
||||||
|
DATABASE_LOGGING: "false"
|
||||||
|
|
||||||
# Redis
|
# Redis
|
||||||
REDIS_HOST: xpeditis-redis
|
REDIS_HOST: xpeditis-redis
|
||||||
REDIS_PORT: 6379
|
REDIS_PORT: 6379
|
||||||
REDIS_PASSWORD: hXiy5GMPswMtxMZujjS2O
|
REDIS_PASSWORD: xpeditis_redis_password_CHANGE_ME
|
||||||
|
REDIS_TTL: 900
|
||||||
|
|
||||||
# JWT
|
# JWT
|
||||||
JWT_SECRET: 4C4tQC8qym/evv4zI5DaUE1yy3kilEnm6lApOGD0GgNBLA0BLm2tVyUr1Lr0mTnV
|
JWT_SECRET: your-super-secret-jwt-key-CHANGE_ME-min-32-characters
|
||||||
|
JWT_ACCESS_EXPIRATION: 15m
|
||||||
|
JWT_REFRESH_EXPIRATION: 7d
|
||||||
|
|
||||||
# S3/MinIO
|
# S3/MinIO
|
||||||
AWS_S3_ENDPOINT: http://xpeditis-minio:9000
|
AWS_S3_ENDPOINT: http://xpeditis-minio:9000
|
||||||
AWS_REGION: us-east-1
|
AWS_REGION: us-east-1
|
||||||
AWS_ACCESS_KEY_ID: minioadmin_preprod_CHANGE_ME
|
AWS_ACCESS_KEY_ID: minioadmin_CHANGE_ME
|
||||||
AWS_SECRET_ACCESS_KEY: RBJfD0QVXC5JDfAHCwdUW
|
AWS_SECRET_ACCESS_KEY: minioadmin_password_CHANGE_ME
|
||||||
AWS_S3_BUCKET: xpeditis-csv-rates
|
AWS_S3_BUCKET: xpeditis-documents
|
||||||
|
AWS_S3_FORCE_PATH_STYLE: "true"
|
||||||
|
|
||||||
# CORS
|
# CORS
|
||||||
CORS_ORIGIN: https://app.preprod.xpeditis.com,https://www.preprod.xpeditis.com
|
CORS_ORIGIN: https://app.xpeditis.com,https://www.xpeditis.com
|
||||||
|
|
||||||
|
# Rate Limiting
|
||||||
|
RATE_LIMIT_TTL: 60
|
||||||
|
RATE_LIMIT_MAX: 100
|
||||||
|
|
||||||
|
# Email (Placeholder - configure based on your email provider)
|
||||||
|
EMAIL_HOST: smtp.example.com
|
||||||
|
EMAIL_PORT: 587
|
||||||
|
EMAIL_USER: noreply@xpeditis.com
|
||||||
|
EMAIL_PASSWORD: email_password_CHANGE_ME
|
||||||
|
EMAIL_FROM: "Xpeditis <noreply@xpeditis.com>"
|
||||||
|
|
||||||
|
# Sentry (Optional - for error tracking)
|
||||||
|
SENTRY_DSN: ""
|
||||||
|
SENTRY_ENVIRONMENT: production
|
||||||
|
SENTRY_TRACES_SAMPLE_RATE: 0.1
|
||||||
|
|
||||||
# App URLs
|
# App URLs
|
||||||
FRONTEND_URL: https://app.preprod.xpeditis.com
|
FRONTEND_URL: https://app.xpeditis.com
|
||||||
API_URL: https://api.preprod.xpeditis.com
|
API_URL: https://api.xpeditis.com
|
||||||
|
|
||||||
networks:
|
networks:
|
||||||
- xpeditis_internal
|
- xpeditis_internal
|
||||||
- traefik_network
|
- traefik_network
|
||||||
|
|
||||||
labels:
|
labels:
|
||||||
- "traefik.enable=true"
|
- "traefik.enable=true"
|
||||||
|
# API Routes
|
||||||
- "traefik.http.routers.xpeditis-api.rule=Host(`api.preprod.xpeditis.com`)"
|
- "traefik.http.routers.xpeditis-api.rule=Host(`api.xpeditis.com`)"
|
||||||
- "traefik.http.routers.xpeditis-api.entrypoints=websecure"
|
- "traefik.http.routers.xpeditis-api.entrypoints=websecure"
|
||||||
- "traefik.http.routers.xpeditis-api.tls=true"
|
- "traefik.http.routers.xpeditis-api.tls=true"
|
||||||
- "traefik.http.routers.xpeditis-api.tls.certresolver=letsencrypt"
|
- "traefik.http.routers.xpeditis-api.tls.certresolver=letsencrypt"
|
||||||
|
- "traefik.http.routers.xpeditis-api.priority=100"
|
||||||
- "traefik.http.services.xpeditis-api.loadbalancer.server.port=4000"
|
- "traefik.http.services.xpeditis-api.loadbalancer.server.port=4000"
|
||||||
- "traefik.http.routers.xpeditis-api.middlewares=xpeditis-api-headers"
|
- "traefik.http.routers.xpeditis-api.middlewares=xpeditis-api-headers,xpeditis-api-ratelimit"
|
||||||
|
- "traefik.docker.network=traefik_network"
|
||||||
# HTTP → HTTPS
|
# Middleware Headers
|
||||||
- "traefik.http.routers.xpeditis-api-http.rule=Host(`api.preprod.xpeditis.com`)"
|
- "traefik.http.middlewares.xpeditis-api-headers.headers.customRequestHeaders.X-Forwarded-Proto=https"
|
||||||
|
- "traefik.http.middlewares.xpeditis-api-headers.headers.customRequestHeaders.X-Forwarded-For="
|
||||||
|
- "traefik.http.middlewares.xpeditis-api-headers.headers.customRequestHeaders.X-Real-IP="
|
||||||
|
- "traefik.http.middlewares.xpeditis-api-headers.headers.accessControlAllowOriginList=https://app.xpeditis.com,https://www.xpeditis.com"
|
||||||
|
- "traefik.http.middlewares.xpeditis-api-headers.headers.accessControlAllowMethods=GET,POST,PUT,PATCH,DELETE,OPTIONS"
|
||||||
|
- "traefik.http.middlewares.xpeditis-api-headers.headers.accessControlAllowHeaders=*"
|
||||||
|
- "traefik.http.middlewares.xpeditis-api-headers.headers.accessControlMaxAge=3600"
|
||||||
|
# Rate Limiting
|
||||||
|
- "traefik.http.middlewares.xpeditis-api-ratelimit.ratelimit.average=100"
|
||||||
|
- "traefik.http.middlewares.xpeditis-api-ratelimit.ratelimit.burst=200"
|
||||||
|
- "traefik.http.middlewares.xpeditis-api-ratelimit.ratelimit.period=1m"
|
||||||
|
# HTTP to HTTPS redirect
|
||||||
|
- "traefik.http.routers.xpeditis-api-http.rule=Host(`api.xpeditis.com`)"
|
||||||
- "traefik.http.routers.xpeditis-api-http.entrypoints=web"
|
- "traefik.http.routers.xpeditis-api-http.entrypoints=web"
|
||||||
|
- "traefik.http.routers.xpeditis-api-http.priority=100"
|
||||||
- "traefik.http.routers.xpeditis-api-http.middlewares=xpeditis-redirect"
|
- "traefik.http.routers.xpeditis-api-http.middlewares=xpeditis-redirect"
|
||||||
- "traefik.http.routers.xpeditis-api-http.service=xpeditis-api"
|
- "traefik.http.routers.xpeditis-api-http.service=xpeditis-api"
|
||||||
|
depends_on:
|
||||||
- "traefik.docker.network=traefik_network"
|
- xpeditis-db
|
||||||
|
- xpeditis-redis
|
||||||
|
- xpeditis-minio
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:4000/health"]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 10s
|
||||||
|
retries: 3
|
||||||
|
start_period: 60s
|
||||||
|
deploy:
|
||||||
|
replicas: 2
|
||||||
|
placement:
|
||||||
|
constraints:
|
||||||
|
- node.role == manager
|
||||||
|
restart_policy:
|
||||||
|
condition: on-failure
|
||||||
|
delay: 10s
|
||||||
|
max_attempts: 3
|
||||||
|
update_config:
|
||||||
|
parallelism: 1
|
||||||
|
delay: 10s
|
||||||
|
order: start-first
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
cpus: '1.0'
|
||||||
|
memory: 1G
|
||||||
|
reservations:
|
||||||
|
cpus: '0.5'
|
||||||
|
memory: 512M
|
||||||
|
|
||||||
# Frontend (Next.js)
|
# Frontend (Next.js)
|
||||||
xpeditis-frontend:
|
xpeditis-frontend:
|
||||||
image: rg.fr-par.scw.cloud/weworkstudio/xpeditis-frontend:preprod
|
image: xpeditis/frontend:latest
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
environment:
|
environment:
|
||||||
NODE_ENV: preprod
|
NODE_ENV: production
|
||||||
NEXT_PUBLIC_API_URL: https://api.preprod.xpeditis.com
|
NEXT_PUBLIC_API_URL: https://api.xpeditis.com
|
||||||
NEXT_PUBLIC_WS_URL: wss://api.preprod.xpeditis.com
|
NEXT_PUBLIC_WS_URL: wss://api.xpeditis.com
|
||||||
networks:
|
networks:
|
||||||
- traefik_network
|
- traefik_network
|
||||||
|
|
||||||
labels:
|
labels:
|
||||||
- "traefik.enable=true"
|
- "traefik.enable=true"
|
||||||
|
# Frontend Routes
|
||||||
- "traefik.http.routers.xpeditis-app.rule=Host(`app.preprod.xpeditis.com`) || Host(`www.preprod.xpeditis.com`)"
|
- "traefik.http.routers.xpeditis-app.rule=Host(`app.xpeditis.com`) || Host(`www.xpeditis.com`)"
|
||||||
- "traefik.http.routers.xpeditis-app.entrypoints=websecure"
|
- "traefik.http.routers.xpeditis-app.entrypoints=websecure"
|
||||||
- "traefik.http.routers.xpeditis-app.tls=true"
|
- "traefik.http.routers.xpeditis-app.tls=true"
|
||||||
- "traefik.http.routers.xpeditis-app.tls.certresolver=letsencrypt"
|
- "traefik.http.routers.xpeditis-app.tls.certresolver=letsencrypt"
|
||||||
|
- "traefik.http.routers.xpeditis-app.priority=50"
|
||||||
- "traefik.http.services.xpeditis-app.loadbalancer.server.port=3000"
|
- "traefik.http.services.xpeditis-app.loadbalancer.server.port=3000"
|
||||||
|
- "traefik.http.routers.xpeditis-app.middlewares=xpeditis-app-headers"
|
||||||
# HTTP → HTTPS
|
- "traefik.docker.network=traefik_network"
|
||||||
- "traefik.http.routers.xpeditis-app-http.rule=Host(`app.preprod.xpeditis.com`) || Host(`www.preprod.xpeditis.com`)"
|
# Middleware Headers
|
||||||
|
- "traefik.http.middlewares.xpeditis-app-headers.headers.customRequestHeaders.X-Forwarded-Proto=https"
|
||||||
|
- "traefik.http.middlewares.xpeditis-app-headers.headers.customRequestHeaders.X-Forwarded-For="
|
||||||
|
- "traefik.http.middlewares.xpeditis-app-headers.headers.customRequestHeaders.X-Real-IP="
|
||||||
|
# Security Headers
|
||||||
|
- "traefik.http.middlewares.xpeditis-app-headers.headers.stsSeconds=31536000"
|
||||||
|
- "traefik.http.middlewares.xpeditis-app-headers.headers.stsIncludeSubdomains=true"
|
||||||
|
- "traefik.http.middlewares.xpeditis-app-headers.headers.stsPreload=true"
|
||||||
|
- "traefik.http.middlewares.xpeditis-app-headers.headers.forceSTSHeader=true"
|
||||||
|
- "traefik.http.middlewares.xpeditis-app-headers.headers.contentTypeNosniff=true"
|
||||||
|
- "traefik.http.middlewares.xpeditis-app-headers.headers.browserXssFilter=true"
|
||||||
|
# HTTP to HTTPS redirect
|
||||||
|
- "traefik.http.routers.xpeditis-app-http.rule=Host(`app.xpeditis.com`) || Host(`www.xpeditis.com`)"
|
||||||
- "traefik.http.routers.xpeditis-app-http.entrypoints=web"
|
- "traefik.http.routers.xpeditis-app-http.entrypoints=web"
|
||||||
|
- "traefik.http.routers.xpeditis-app-http.priority=50"
|
||||||
- "traefik.http.routers.xpeditis-app-http.middlewares=xpeditis-redirect"
|
- "traefik.http.routers.xpeditis-app-http.middlewares=xpeditis-redirect"
|
||||||
- "traefik.http.routers.xpeditis-app-http.service=xpeditis-app"
|
- "traefik.http.routers.xpeditis-app-http.service=xpeditis-app"
|
||||||
|
depends_on:
|
||||||
- "traefik.docker.network=traefik_network"
|
- xpeditis-backend
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:3000/"]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 10s
|
||||||
|
retries: 3
|
||||||
|
start_period: 40s
|
||||||
|
deploy:
|
||||||
|
replicas: 2
|
||||||
|
placement:
|
||||||
|
constraints:
|
||||||
|
- node.role == manager
|
||||||
|
restart_policy:
|
||||||
|
condition: on-failure
|
||||||
|
delay: 10s
|
||||||
|
max_attempts: 3
|
||||||
|
update_config:
|
||||||
|
parallelism: 1
|
||||||
|
delay: 10s
|
||||||
|
order: start-first
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
cpus: '0.5'
|
||||||
|
memory: 512M
|
||||||
|
reservations:
|
||||||
|
cpus: '0.25'
|
||||||
|
memory: 256M
|
||||||
|
|
||||||
volumes:
|
volumes:
|
||||||
xpeditis_db_data:
|
xpeditis_db_data:
|
||||||
|
driver: local
|
||||||
xpeditis_redis_data:
|
xpeditis_redis_data:
|
||||||
|
driver: local
|
||||||
xpeditis_minio_data:
|
xpeditis_minio_data:
|
||||||
|
driver: local
|
||||||
|
|
||||||
networks:
|
networks:
|
||||||
traefik_network:
|
traefik_network:
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user