Home > Software engineering >  Trouble with updating running VueJs app in production without downtime
Trouble with updating running VueJs app in production without downtime

Time:11-25

I have a problem updating running in production mode VueJS app. Every time instead of just running

docker-compose up --build

I should do

docker-compose down
docker volume rm mkstat_frontend_dist
docker-compose up --build

And there is a lot of downtime during this, so I want to solve this problem. I understanding that this is because of volume, but don't know how to solve this. I've tried just remove this volume but then nginx responding with

directory index of "/app/dist/" is forbidden

This is my app structure:

.
├── docker/
├── docker-compose.back.yml
├── docker-compose.dev.yml
├── docker-compose.yml
├── fresh_dump.sql
├── init-letsencrypt.sh
├── mkstat_backend/
├── mkstat_frontend/
├── redis.conf

This is my prod docker-compose file:

docker-compose.yml

version: "3.8"

services:
  backend:
    container_name: backend
    restart: always
    build:
      context: ./mkstat_backend
      dockerfile: Dockerfile.prod
    volumes:
      - static:/app/static
      - media:/app/media
    env_file:
      - ./mkstat_backend/.env.prod
    depends_on:
      - db

  db:
    container_name: db
    restart: always
    build:
      context: ./docker/postgres
      dockerfile: Dockerfile
    volumes:
      - pgdata:/var/lib/postgresql/data/
    ports:
      - "5432:5432"
    env_file:
      - ./mkstat_backend/.env.prod

  frontend:
    container_name: frontend
    build:
      context: ./mkstat_frontend
      dockerfile: Dockerfile.prod
    volumes:
      - frontend_dist:/app/dist
    depends_on:
      - backend

  nginx:
    image: nginx:alpine
    restart: unless-stopped
    command: '/bin/sh -c ''while :; do sleep 6h & wait $${!}; nginx -s reload; done & nginx -g "daemon off;"'''
    container_name: nginx
    ports:
      - "80:80"
      - "443:443"
    volumes:
      - ./docker/nginx/nginx.conf:/etc/nginx/conf.d/default.conf
      - ./docker/certbot/conf:/etc/letsencrypt
      - ./docker/certbot/www:/var/www/certbot
      - static:/var/html/static
      - media:/var/html/media
      - frontend_dist:/app/dist
    depends_on:
      - backend
      - frontend

  certbot:
    container_name: certbot
    image: certbot/certbot
    restart: unless-stopped
    entrypoint: "/bin/sh -c 'trap exit TERM; while :; do certbot renew; sleep 12h & wait $${!}; done;'"

    volumes:
      - ./docker/certbot/conf:/etc/letsencrypt
      - ./docker/certbot/www:/var/www/certbot

  redis:
    image: redis:latest
    restart: always
    container_name: redis

    command: [
        "bash",
        "-c",
        "
        redis-server
        --requirepass $${REDIS_PASS}
        ",
      ]
    volumes:
      - redis:/var/lib/redis/data
      - ./redis.conf:/usr/local/etc/redis/redis.conf
    ports:
      - "6379"
    env_file:
      - ./mkstat_backend/.env.prod

  worker:
    build:
      context: ./mkstat_backend
    command: celery -A mkstat worker -B -l info -s /tmp/celerybeat-schedule
    restart: always
    container_name: celery
    depends_on:
      - db
      - redis
      - backend
    volumes:
      - ./mkstat_backend:/app
    env_file:
      - ./mkstat_backend/.env.prod

volumes:
  pgdata:
  static:
  media:
  frontend_dist:
  redis:

Vue Dockerfile:

FROM node:lts-alpine as build
WORKDIR /app
ENV PATH /app/node_modules/.bin:$PATH
COPY package.json /app/package.json
RUN npm install --silent
RUN npm install @vue/[email protected] -g
COPY . /app
RUN npm run build

Thanks in advance.

CodePudding user response:

TL;DR

docker-compose down
docker volume rm mkstat_frontend_dist
docker-compose up --build

... is not optimal as the services will be down for the duration of the build. The services don't need to be taken down for the build, thus:

docker-compose build
docker-compose down
docker volume rm mststat_frontend_dist
docker-compose up -d

... would be slightly more efficient as the services would only be taken offline to purge the old containers and volume then new containers will be created from the pre-built images.


The volume backing the dist files isn't needed as far as I can tell, you could remove it. The dist files served in the deployment would be that which is built in the image without having to drop the volume every re-deployment.

If you're pushing your images to Docker Hub or other docker registry then the image(s) have already been built, you don't need to re-build during the re-deployment. Your process might look like:

  • build the docker image on your PC
  • push the docker image to the docker registry
  • pull the docker image on the server
  • docker-compose down the services to remove the old containers
  • docker-compose up -d to start new containers from the images

For a production deployment of nginx, this is the example config that I use as a base:

worker_processes 1;

error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;

events {
  worker_connections 1024;
}

http {
  map $http_upgrade $connection_upgrade {
    default upgrade;
    '' close;
  }

  include /etc/nginx/mime.types;
  default_type application/octet-stream;

  log_format  main  '$remote_addr - $remote_user [$time_local] "$request" '
                    '$status $body_bytes_sent "$http_referer" '
                    '"$http_user_agent" "$http_x_forwarded_for"';

  client_max_body_size 0;
  access_log /var/log/nginx/access.log main;
  sendfile on;
  keepalive_timeout 65;

  upstream web {
    server web:80 max_fails=3;
  }
  server {
    listen *:80;
    listen [::]:80;
    server_name _;
    return 301 https://$host$request_uri;
  }
  add_header X-Frame-Options SAMEORIGIN;
  add_header X-Content-Type-Options nosniff;
  add_header Strict-Transport-Security "max-age=31536000; includeSubdomains; preload";
  add_header Content-Security-Policy "default-src 'self';";
  server {
    listen *:443 ssl http2;
    listen [::]:443 ssl http2;
    server_name *.example.com example.com;
    charset utf-8;

    error_page 404 = @notfound;

    server_tokens off;

    proxy_buffer_size 128k;
    proxy_buffers 4 256k;
    proxy_busy_buffers_size 256k;

    ssl_certificate /etc/letsencrypt/live/example.com/fullchain.pem;
    ssl_certificate_key /etc/letsencrypt/live/example.com/privkey.pem;
    ssl_session_cache shared:SSL:50m;
    ssl_session_timeout 1d;
    ssl_session_tickets off;
    ssl_dhparam /etc/nginx/ssl/dhparam.pem;

    ssl_prefer_server_ciphers on;
    ssl_protocols TLSv1.2 TLSv1.3;
    ssl_ciphers 'ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA:ECDHE-RSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA256:DHE-RSA-AES256-SHA:ECDHE-ECDSA-DES-CBC3-SHA:ECDHE-RSA-DES-CBC3-SHA:EDH-RSA-DES-CBC3-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:DES-CBC3-SHA:!DSS';

    resolver 127.0.0.11 1.1.1.1 8.8.8.8 8.8.4.4 valid=86400s;
    resolver_timeout 5s;
    ssl_stapling on;
    ssl_stapling_verify on;
    ssl_trusted_certificate /etc/letsencrypt/live/example.com/chain.pem;

    error_page 404 = @notfound;

    location @notfound {
      return 301 /;
    }
    location /healthz {
      allow 127.0.0.1;
      deny all;
      stub_status;
    }
    location / {
      proxy_http_version 1.1;
      proxy_set_header HOST $host;
      proxy_set_header X-Real-IP $remote_addr;
      proxy_set_header Upgrade $http_upgrade;
      proxy_set_header Connection $connection_upgrade;
      proxy_set_header X-Forwarded-Proto $scheme;
      proxy_set_header X-Forwarded-Server $host;
      proxy_set_header X-Forwarded-Port $server_port;
      proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
      proxy_pass http://web/;
    }
  }
}

The nginx.conf example is the result of a few third-party pentests, e.g. server_tokens (Default: on) will be flagged, allowing old versions of TLS/SSL will be flagged, not setting the Content-Security-Policy header will be flagged, etc.

  • Related