xpeditis2.0/docs/deployment/hetzner/09-kubernetes-manifests.md
2026-03-26 18:08:28 +01:00

18 KiB

09 — Manifests Kubernetes complets

Tous les fichiers YAML de déploiement de Xpeditis. Créez un dossier k8s/ à la racine du projet.


Structure des fichiers

k8s/
├── 00-namespaces.yaml
├── 01-secrets.yaml          # ← À remplir avec vos valeurs (ne pas committer)
├── 02-configmaps.yaml
├── 03-backend-deployment.yaml
├── 04-backend-service.yaml
├── 05-frontend-deployment.yaml
├── 06-frontend-service.yaml
├── 07-ingress.yaml
├── 08-hpa.yaml
└── 09-pdb.yaml

00 — Namespaces

# k8s/00-namespaces.yaml
---
apiVersion: v1
kind: Namespace
metadata:
  name: xpeditis-prod
  labels:
    environment: production
    app.kubernetes.io/managed-by: hetzner-k3s
kubectl apply -f k8s/00-namespaces.yaml

01 — Secrets (⚠️ ne jamais committer ce fichier dans Git)

Ajoutez k8s/01-secrets.yaml à votre .gitignore.

# k8s/01-secrets.yaml  ← AJOUTER AU .gitignore
---
apiVersion: v1
kind: Secret
metadata:
  name: backend-secrets
  namespace: xpeditis-prod
type: Opaque
stringData:
  # Application
  NODE_ENV: "production"
  PORT: "4000"
  API_PREFIX: "api/v1"
  APP_URL: "https://app.xpeditis.com"
  FRONTEND_URL: "https://app.xpeditis.com"

  # Base de données (choisir Option A ou B)
  # === Option A : Neon.tech ===
  DATABASE_HOST: "ep-xxx.eu-central-1.aws.neon.tech"
  DATABASE_PORT: "5432"
  DATABASE_USER: "xpeditis"
  DATABASE_PASSWORD: "<NEON_PASSWORD>"
  DATABASE_NAME: "xpeditis"
  DATABASE_SSL: "true"
  DATABASE_SYNC: "false"
  DATABASE_LOGGING: "false"
  # === Option B : Self-hosted ===
  # DATABASE_HOST: "10.0.1.100"        # IP privée Hetzner du serveur PG
  # DATABASE_PORT: "6432"              # PgBouncer
  # DATABASE_USER: "xpeditis"
  # DATABASE_PASSWORD: "<PG_PASSWORD>"
  # DATABASE_NAME: "xpeditis_prod"
  # DATABASE_SYNC: "false"
  # DATABASE_LOGGING: "false"

  # Redis (choisir Option A ou B)
  # === Option A : Upstash ===
  REDIS_HOST: "your-redis.upstash.io"
  REDIS_PORT: "6379"
  REDIS_PASSWORD: "<UPSTASH_TOKEN>"
  REDIS_DB: "0"
  # === Option B : Self-hosted ===
  # REDIS_HOST: "redis.xpeditis-prod.svc.cluster.local"
  # REDIS_PORT: "6379"
  # REDIS_PASSWORD: "<REDIS_PASSWORD>"
  # REDIS_DB: "0"

  # JWT
  JWT_SECRET: "<CHAINE_ALEATOIRE_64_CHARS>"
  JWT_ACCESS_EXPIRATION: "15m"
  JWT_REFRESH_EXPIRATION: "7d"

  # OAuth2 Google
  GOOGLE_CLIENT_ID: "<GOOGLE_CLIENT_ID>"
  GOOGLE_CLIENT_SECRET: "<GOOGLE_CLIENT_SECRET>"
  GOOGLE_CALLBACK_URL: "https://api.xpeditis.com/api/v1/auth/google/callback"

  # OAuth2 Microsoft
  MICROSOFT_CLIENT_ID: "<MICROSOFT_CLIENT_ID>"
  MICROSOFT_CLIENT_SECRET: "<MICROSOFT_CLIENT_SECRET>"
  MICROSOFT_CALLBACK_URL: "https://api.xpeditis.com/api/v1/auth/microsoft/callback"

  # Email (Brevo SMTP — remplace SendGrid)
  SMTP_HOST: "smtp-relay.brevo.com"
  SMTP_PORT: "587"
  SMTP_SECURE: "false"
  SMTP_USER: "<BREVO_LOGIN>"
  SMTP_PASS: "<BREVO_SMTP_KEY>"
  SMTP_FROM: "noreply@xpeditis.com"

  # Hetzner Object Storage (remplace MinIO)
  AWS_S3_ENDPOINT: "https://fsn1.your-objectstorage.com"
  AWS_ACCESS_KEY_ID: "<HETZNER_ACCESS_KEY>"
  AWS_SECRET_ACCESS_KEY: "<HETZNER_SECRET_KEY>"
  AWS_REGION: "eu-central-1"
  AWS_S3_BUCKET: "xpeditis-prod"

  # Carrier APIs
  MAERSK_API_KEY: "<MAERSK_API_KEY>"
  MAERSK_API_URL: "https://api.maersk.com/v1"
  MSC_API_KEY: "<MSC_API_KEY>"
  MSC_API_URL: "https://api.msc.com/v1"
  CMACGM_API_URL: "https://api.cma-cgm.com/v1"
  CMACGM_CLIENT_ID: "<CMACGM_CLIENT_ID>"
  CMACGM_CLIENT_SECRET: "<CMACGM_CLIENT_SECRET>"
  HAPAG_API_URL: "https://api.hapag-lloyd.com/v1"
  HAPAG_API_KEY: "<HAPAG_API_KEY>"
  ONE_API_URL: "https://api.one-line.com/v1"
  ONE_USERNAME: "<ONE_USERNAME>"
  ONE_PASSWORD: "<ONE_PASSWORD>"

  # Stripe
  STRIPE_SECRET_KEY: "sk_live_<...>"
  STRIPE_WEBHOOK_SECRET: "whsec_<...>"
  STRIPE_SILVER_MONTHLY_PRICE_ID: "price_<...>"
  STRIPE_SILVER_YEARLY_PRICE_ID: "price_<...>"
  STRIPE_GOLD_MONTHLY_PRICE_ID: "price_<...>"
  STRIPE_GOLD_YEARLY_PRICE_ID: "price_<...>"
  STRIPE_PLATINIUM_MONTHLY_PRICE_ID: "price_<...>"
  STRIPE_PLATINIUM_YEARLY_PRICE_ID: "price_<...>"

  # Sécurité
  BCRYPT_ROUNDS: "12"
  SESSION_TIMEOUT_MS: "7200000"
  RATE_LIMIT_TTL: "60"
  RATE_LIMIT_MAX: "100"

  # Monitoring
  SENTRY_DSN: "<SENTRY_DSN>"
---
apiVersion: v1
kind: Secret
metadata:
  name: frontend-secrets
  namespace: xpeditis-prod
type: Opaque
stringData:
  NEXT_PUBLIC_API_URL: "https://api.xpeditis.com"
  NEXT_PUBLIC_APP_URL: "https://app.xpeditis.com"
  NEXT_PUBLIC_API_PREFIX: "api/v1"
  NEXTAUTH_URL: "https://app.xpeditis.com"
  NEXTAUTH_SECRET: "<CHAINE_ALEATOIRE_32_CHARS>"
  GOOGLE_CLIENT_ID: "<GOOGLE_CLIENT_ID>"
  GOOGLE_CLIENT_SECRET: "<GOOGLE_CLIENT_SECRET>"
  MICROSOFT_CLIENT_ID: "<MICROSOFT_CLIENT_ID>"
  MICROSOFT_CLIENT_SECRET: "<MICROSOFT_CLIENT_SECRET>"
  NODE_ENV: "production"
# Générer les secrets aléatoires
echo "JWT_SECRET=$(openssl rand -base64 48)"
echo "NEXTAUTH_SECRET=$(openssl rand -base64 24)"

# Appliquer (après avoir rempli les valeurs)
kubectl apply -f k8s/01-secrets.yaml

# Vérifier (sans voir les valeurs)
kubectl get secret backend-secrets -n xpeditis-prod -o jsonpath='{.data}' | jq 'keys'

02 — ConfigMaps (variables non-sensibles)

# k8s/02-configmaps.yaml
---
apiVersion: v1
kind: ConfigMap
metadata:
  name: backend-config
  namespace: xpeditis-prod
data:
  # Ces valeurs ne sont pas sensibles
  LOG_LEVEL: "info"
  TZ: "Europe/Paris"

---
apiVersion: v1
kind: ConfigMap
metadata:
  name: frontend-config
  namespace: xpeditis-prod
data:
  TZ: "Europe/Paris"

03 — Deployment Backend NestJS

# k8s/03-backend-deployment.yaml
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: xpeditis-backend
  namespace: xpeditis-prod
  labels:
    app: xpeditis-backend
    version: "latest"
spec:
  replicas: 2
  selector:
    matchLabels:
      app: xpeditis-backend
  strategy:
    type: RollingUpdate
    rollingUpdate:
      maxSurge: 1
      maxUnavailable: 0        # Zero downtime deployment
  template:
    metadata:
      labels:
        app: xpeditis-backend
        version: "latest"
      annotations:
        prometheus.io/scrape: "true"
        prometheus.io/port: "4000"
        prometheus.io/path: "/api/v1/health"
    spec:
      # Anti-affinité : pods sur nœuds différents
      affinity:
        podAntiAffinity:
          preferredDuringSchedulingIgnoredDuringExecution:
          - weight: 100
            podAffinityTerm:
              labelSelector:
                matchExpressions:
                - key: app
                  operator: In
                  values:
                  - xpeditis-backend
              topologyKey: kubernetes.io/hostname

      # Temps de grâce pour les connexions WebSocket
      terminationGracePeriodSeconds: 60

      containers:
      - name: backend
        # L'image est mise à jour par le CI/CD (doc 11)
        image: ghcr.io/<VOTRE_ORG>/xpeditis-backend:latest
        imagePullPolicy: Always
        ports:
        - containerPort: 4000
          name: http
          protocol: TCP

        # Variables d'environnement depuis les Secrets
        envFrom:
        - secretRef:
            name: backend-secrets
        - configMapRef:
            name: backend-config

        # Resources (MVP — ajuster selon les métriques réelles)
        resources:
          requests:
            cpu: "500m"
            memory: "512Mi"
          limits:
            cpu: "2000m"
            memory: "1.5Gi"

        # Health checks
        startupProbe:
          httpGet:
            path: /api/v1/health
            port: 4000
          initialDelaySeconds: 20
          periodSeconds: 5
          failureThreshold: 12    # 60 secondes max au démarrage

        readinessProbe:
          httpGet:
            path: /api/v1/health
            port: 4000
          initialDelaySeconds: 5
          periodSeconds: 10
          successThreshold: 1
          failureThreshold: 3

        livenessProbe:
          httpGet:
            path: /api/v1/health
            port: 4000
          initialDelaySeconds: 60
          periodSeconds: 30
          failureThreshold: 3

        # Lifecycle hook pour graceful shutdown
        lifecycle:
          preStop:
            exec:
              command: ["/bin/sh", "-c", "sleep 10"]   # Laisse le temps au LB de retirer le pod

      # Pull depuis GHCR (GitHub Container Registry)
      imagePullSecrets:
      - name: ghcr-credentials

      # Redémarrage automatique
      restartPolicy: Always

04 — Service Backend

# k8s/04-backend-service.yaml
---
apiVersion: v1
kind: Service
metadata:
  name: xpeditis-backend
  namespace: xpeditis-prod
  labels:
    app: xpeditis-backend
spec:
  selector:
    app: xpeditis-backend
  ports:
  - name: http
    port: 4000
    targetPort: 4000
    protocol: TCP
  type: ClusterIP

05 — Deployment Frontend Next.js

# k8s/05-frontend-deployment.yaml
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: xpeditis-frontend
  namespace: xpeditis-prod
  labels:
    app: xpeditis-frontend
spec:
  replicas: 1
  selector:
    matchLabels:
      app: xpeditis-frontend
  strategy:
    type: RollingUpdate
    rollingUpdate:
      maxSurge: 1
      maxUnavailable: 0
  template:
    metadata:
      labels:
        app: xpeditis-frontend
    spec:
      terminationGracePeriodSeconds: 30

      containers:
      - name: frontend
        image: ghcr.io/<VOTRE_ORG>/xpeditis-frontend:latest
        imagePullPolicy: Always
        ports:
        - containerPort: 3000
          name: http

        envFrom:
        - secretRef:
            name: frontend-secrets
        - configMapRef:
            name: frontend-config

        resources:
          requests:
            cpu: "250m"
            memory: "256Mi"
          limits:
            cpu: "1000m"
            memory: "768Mi"

        startupProbe:
          httpGet:
            path: /
            port: 3000
          initialDelaySeconds: 10
          periodSeconds: 5
          failureThreshold: 12

        readinessProbe:
          httpGet:
            path: /
            port: 3000
          initialDelaySeconds: 5
          periodSeconds: 10
          failureThreshold: 3

        livenessProbe:
          httpGet:
            path: /
            port: 3000
          initialDelaySeconds: 30
          periodSeconds: 30
          failureThreshold: 3

        lifecycle:
          preStop:
            exec:
              command: ["/bin/sh", "-c", "sleep 5"]

      imagePullSecrets:
      - name: ghcr-credentials

      restartPolicy: Always

06 — Service Frontend

# k8s/06-frontend-service.yaml
---
apiVersion: v1
kind: Service
metadata:
  name: xpeditis-frontend
  namespace: xpeditis-prod
  labels:
    app: xpeditis-frontend
spec:
  selector:
    app: xpeditis-frontend
  ports:
  - name: http
    port: 3000
    targetPort: 3000
    protocol: TCP
  type: ClusterIP

07 — Ingress (Traefik + TLS)

# k8s/07-ingress.yaml
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: xpeditis-ingress
  namespace: xpeditis-prod
  annotations:
    # TLS via cert-manager
    cert-manager.io/cluster-issuer: "letsencrypt-prod"

    # Traefik config
    traefik.ingress.kubernetes.io/router.entrypoints: "websecure"
    traefik.ingress.kubernetes.io/router.tls: "true"

    # Sticky sessions pour WebSocket Socket.IO
    traefik.ingress.kubernetes.io/service.sticky.cookie: "true"
    traefik.ingress.kubernetes.io/service.sticky.cookie.name: "XPEDITIS_BACKEND"
    traefik.ingress.kubernetes.io/service.sticky.cookie.secure: "true"
    traefik.ingress.kubernetes.io/service.sticky.cookie.httponly: "true"

    # Timeout pour les longues requêtes (carrier APIs = jusqu'à 30s)
    traefik.ingress.kubernetes.io/router.middlewares: "xpeditis-prod-ratelimit@kubernetescrd"

    # Headers de sécurité
    traefik.ingress.kubernetes.io/router.middlewares: "xpeditis-prod-headers@kubernetescrd"

spec:
  ingressClassName: traefik
  tls:
  - hosts:
    - api.xpeditis.com
    - app.xpeditis.com
    secretName: xpeditis-tls-prod

  rules:
  # API Backend NestJS
  - host: api.xpeditis.com
    http:
      paths:
      - path: /
        pathType: Prefix
        backend:
          service:
            name: xpeditis-backend
            port:
              number: 4000

  # Frontend Next.js
  - host: app.xpeditis.com
    http:
      paths:
      - path: /
        pathType: Prefix
        backend:
          service:
            name: xpeditis-frontend
            port:
              number: 3000
---
# Middleware : headers de sécurité
apiVersion: traefik.io/v1alpha1
kind: Middleware
metadata:
  name: headers
  namespace: xpeditis-prod
spec:
  headers:
    customRequestHeaders:
      X-Forwarded-Proto: "https"
    customResponseHeaders:
      X-Frame-Options: "SAMEORIGIN"
      X-Content-Type-Options: "nosniff"
      X-XSS-Protection: "1; mode=block"
      Referrer-Policy: "strict-origin-when-cross-origin"
      Permissions-Policy: "geolocation=(), microphone=(), camera=()"
    contentSecurityPolicy: "default-src 'self'; script-src 'self' 'unsafe-inline' 'unsafe-eval'; style-src 'self' 'unsafe-inline';"
    stsSeconds: 31536000
    stsIncludeSubdomains: true
    stsPreload: true
---
# Middleware : rate limiting Traefik (en plus du rate limiting NestJS)
apiVersion: traefik.io/v1alpha1
kind: Middleware
metadata:
  name: ratelimit
  namespace: xpeditis-prod
spec:
  rateLimit:
    average: 100
    burst: 50
    period: 1m
    sourceCriterion:
      ipStrategy:
        depth: 1

08 — Horizontal Pod Autoscaler

# k8s/08-hpa.yaml
---
# HPA Backend
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
  name: backend-hpa
  namespace: xpeditis-prod
spec:
  scaleTargetRef:
    apiVersion: apps/v1
    kind: Deployment
    name: xpeditis-backend
  minReplicas: 2
  maxReplicas: 15
  metrics:
  - type: Resource
    resource:
      name: cpu
      target:
        type: Utilization
        averageUtilization: 70
  - type: Resource
    resource:
      name: memory
      target:
        type: Utilization
        averageUtilization: 80
  behavior:
    scaleUp:
      stabilizationWindowSeconds: 60
      policies:
      - type: Pods
        value: 2
        periodSeconds: 60
    scaleDown:
      stabilizationWindowSeconds: 300     # 5 min avant de réduire
      policies:
      - type: Pods
        value: 1
        periodSeconds: 120
---
# HPA Frontend
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
  name: frontend-hpa
  namespace: xpeditis-prod
spec:
  scaleTargetRef:
    apiVersion: apps/v1
    kind: Deployment
    name: xpeditis-frontend
  minReplicas: 1
  maxReplicas: 8
  metrics:
  - type: Resource
    resource:
      name: cpu
      target:
        type: Utilization
        averageUtilization: 70
  behavior:
    scaleDown:
      stabilizationWindowSeconds: 300

09 — PodDisruptionBudget

# k8s/09-pdb.yaml
---
# Garantit qu'au moins 1 pod backend est toujours disponible pendant les maintenances
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
  name: backend-pdb
  namespace: xpeditis-prod
spec:
  minAvailable: 1
  selector:
    matchLabels:
      app: xpeditis-backend
---
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
  name: frontend-pdb
  namespace: xpeditis-prod
spec:
  minAvailable: 1
  selector:
    matchLabels:
      app: xpeditis-frontend

Secret GHCR (GitHub Container Registry)

Pour que Kubernetes puisse pull les images depuis GHCR :

# Créer un Personal Access Token GitHub avec scope: read:packages
# https://github.com/settings/tokens/new

kubectl create secret docker-registry ghcr-credentials \
  --namespace xpeditis-prod \
  --docker-server=ghcr.io \
  --docker-username=<VOTRE_USERNAME_GITHUB> \
  --docker-password=<VOTRE_GITHUB_PAT> \
  --docker-email=<VOTRE_EMAIL>

Déploiement complet

# Appliquer tous les manifests dans l'ordre
kubectl apply -f k8s/00-namespaces.yaml
kubectl apply -f k8s/01-secrets.yaml      # Après avoir rempli les valeurs
kubectl apply -f k8s/02-configmaps.yaml
kubectl apply -f k8s/03-backend-deployment.yaml
kubectl apply -f k8s/04-backend-service.yaml
kubectl apply -f k8s/05-frontend-deployment.yaml
kubectl apply -f k8s/06-frontend-service.yaml
kubectl apply -f k8s/07-ingress.yaml
kubectl apply -f k8s/08-hpa.yaml
kubectl apply -f k8s/09-pdb.yaml

# Ou tout d'un coup
kubectl apply -f k8s/

# Suivre le déploiement
kubectl rollout status deployment/xpeditis-backend -n xpeditis-prod
kubectl rollout status deployment/xpeditis-frontend -n xpeditis-prod

# Voir les pods
kubectl get pods -n xpeditis-prod -w

# Voir les logs
kubectl logs -f deployment/xpeditis-backend -n xpeditis-prod
kubectl logs -f deployment/xpeditis-frontend -n xpeditis-prod

# Vérifier le certificat TLS
kubectl get certificate -n xpeditis-prod
# NAME               READY   SECRET             AGE
# xpeditis-tls-prod  True    xpeditis-tls-prod  2m

Migration des jobs TypeORM

Le déploiement inclut automatiquement les migrations via le startup.js dans le Dockerfile. Si vous avez besoin de lancer les migrations manuellement :

# Job de migration one-shot
cat > /tmp/migration-job.yaml << 'EOF'
apiVersion: batch/v1
kind: Job
metadata:
  name: xpeditis-migrations
  namespace: xpeditis-prod
spec:
  template:
    spec:
      restartPolicy: OnFailure
      containers:
      - name: migrations
        image: ghcr.io/<VOTRE_ORG>/xpeditis-backend:latest
        command: ["node", "dist/migration-runner.js"]
        envFrom:
        - secretRef:
            name: backend-secrets
      imagePullSecrets:
      - name: ghcr-credentials
EOF

kubectl apply -f /tmp/migration-job.yaml
kubectl wait --for=condition=complete job/xpeditis-migrations -n xpeditis-prod --timeout=300s
kubectl logs job/xpeditis-migrations -n xpeditis-prod
kubectl delete job xpeditis-migrations -n xpeditis-prod