Перемещена папка docker в myproject и защита секретов

- Все docker-файлы теперь в myproject/docker/
- Добавлен docker/.env.docker в gitignore для защиты секретов
- Сохранена обратная совместимость с существующими настройками
- Структура проекта стала более организованной
This commit is contained in:
2026-01-04 00:31:02 +03:00
parent 40d1c5eff6
commit bcda94f09a
7 changed files with 20 additions and 15 deletions

View File

@@ -0,0 +1,32 @@
# Django settings
SECRET_KEY=change-this-to-a-secure-random-key-in-production-min-50-chars
DEBUG=False
ALLOWED_HOSTS=yourdomain.com,*.yourdomain.com,localhost,127.0.0.1
# Database (PostgreSQL)
DB_NAME=inventory_db
DB_USER=postgres
DB_PASSWORD=your-secure-postgres-password-here
DB_HOST=db
DB_PORT=5432
# Redis
REDIS_HOST=redis
REDIS_PORT=6379
REDIS_DB=0
# Celery
CELERY_BROKER_URL=redis://redis:6379/0
# Tenant Admin (создаётся при первом запуске)
TENANT_ADMIN_EMAIL=admin@example.com
TENANT_ADMIN_PASSWORD=change-this-secure-password
TENANT_ADMIN_NAME=Admin
# Django-tenants
# Основной домен для public схемы
PUBLIC_SCHEMA_DOMAIN=yourdomain.com
# Domain settings for multi-tenant URLs
TENANT_DOMAIN_BASE=yourdomain.com
USE_HTTPS=True

View File

@@ -0,0 +1,61 @@
# Dockerfile для Django приложения с Celery
FROM python:3.11-slim
# Переменные окружения
ENV PYTHONDONTWRITEBYTECODE=1
ENV PYTHONUNBUFFERED=1
ENV DJANGO_SETTINGS_MODULE=myproject.settings
# Установка системных зависимостей
RUN apt-get update && apt-get install -y --no-install-recommends \
# Для PostgreSQL
libpq-dev \
postgresql-client \
# Для Pillow и pillow-heif
libjpeg-dev \
libpng-dev \
libwebp-dev \
libheif-dev \
libde265-dev \
# Для сборки Python пакетов
gcc \
g++ \
# Утилиты
curl \
&& rm -rf /var/lib/apt/lists/*
# Рабочая директория
WORKDIR /app
# Копируем requirements и устанавливаем зависимости
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
# Копируем проект
COPY . .
# Создаём директории для статики и медиа
RUN mkdir -p /app/staticfiles /app/media
# Создаём непривилегированного пользователя
RUN useradd -m -u 1000 appuser
# Копируем entrypoint скрипт
# Копируем entrypoint скрипт
COPY docker/entrypoint.sh /entrypoint.sh
COPY docker/create_public_tenant.py /app/docker/create_public_tenant.py
RUN chmod 755 /entrypoint.sh && chown appuser:appuser /entrypoint.sh
# Меняем владельца рабочей директории
RUN chown -R appuser:appuser /app
USER appuser
# Порт приложения
EXPOSE 8000
# Точка входа (запускаем через bash явно, чтобы избежать ошибок Permission denied)
ENTRYPOINT ["/bin/bash", "/entrypoint.sh"]
# Команда по умолчанию (будет переопределена в docker-compose)
CMD ["web"]

View File

@@ -0,0 +1,61 @@
import os
import django
from django.conf import settings
import sys
# Add /app to sys.path so we can import myproject
sys.path.append('/app')
# Setup Django environment
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'myproject.settings')
django.setup()
from tenants.models import Client, Domain
def ensure_public_tenant():
domain_name = os.environ.get('DOMAIN_NAME', 'localhost')
print(f"Checking public tenant for domain: {domain_name}")
email = os.environ.get('TENANT_ADMIN_EMAIL', 'admin@example.com')
name = os.environ.get('TENANT_ADMIN_NAME', 'System Administrator')
# 1. Ensure Client exists
client, created = Client.objects.get_or_create(
schema_name='public',
defaults={
'name': 'System Tenant',
'owner_email': email,
'owner_name': name
}
)
if created:
print("Created public tenant client.")
else:
print("Public tenant client already exists.")
# 2. Ensure Domain exists
# Check if this specific domain exists
domain, created = Domain.objects.get_or_create(
domain=domain_name,
defaults={'tenant': client, 'is_primary': True}
)
if created:
print(f"Created domain {domain_name} for public tenant.")
else:
print(f"Domain {domain_name} already exists.")
if domain.tenant != client:
print(f"WARNING: Domain {domain_name} is assigned to another tenant!")
# 3. Init system data (System Customer, etc.)
# SKIP for public tenant as it doesn't have these tables (they separate in tenant schemas)
# print("Initializing system data for public tenant...")
# from django.core.management import call_command
# try:
# call_command('init_tenant_data', schema='public')
# except Exception as e:
# print(f"Error initializing system data: {e}")
if __name__ == '__main__':
ensure_public_tenant()

View File

@@ -0,0 +1,125 @@
# Все файлы хранятся в /Volume1/DockerAppsData/mixapp/
# YAML файл хранится в /Volume1/DockerYAML/mix/
services:
# PostgreSQL база данных
db:
image: postgres:15-alpine
container_name: mix_postgres
restart: unless-stopped
environment:
- POSTGRES_USER=${DB_USER:-postgres}
- POSTGRES_DB=${DB_NAME:-inventory_db}
env_file:
- .env.docker
volumes:
- ../../postgres:/var/lib/postgresql/data
healthcheck:
test: [ "CMD-SHELL", "pg_isready -U ${DB_USER:-postgres} -d ${DB_NAME:-inventory_db}" ]
interval: 10s
timeout: 5s
retries: 5
networks:
- mix_network
# Redis для кеша и Celery брокера
redis:
image: redis:7-alpine
container_name: mix_redis
restart: unless-stopped
command: redis-server --appendonly yes
volumes:
- ../../redis:/data
healthcheck:
test: [ "CMD", "redis-cli", "ping" ]
interval: 10s
timeout: 5s
retries: 5
networks:
- mix_network
# Django Web приложение
web:
build:
context: ..
dockerfile: docker/Dockerfile
container_name: mix_web
restart: unless-stopped
command: web
env_file:
- .env.docker
environment:
- ALLOWED_HOSTS=mix.smaa.by,.mix.smaa.by,localhost,127.0.0.1
- CSRF_TRUSTED_ORIGINS=https://mix.smaa.by,https://*.mix.smaa.by
- DOMAIN_NAME=mix.smaa.by
- DB_HOST=db
- REDIS_HOST=redis
volumes:
# Монтируем код приложения для горячего обновления (опционально, если нужно обновлять без пересборки)
# - /Volume1/DockerAppsData/mixapp/app:/app
# Медиа и статика
# Медиа и статика (монтируем туда, где их ждет Django)
- ../../media:/app/myproject/media
- ../../static:/app/myproject/staticfiles
ports:
- "8000:8000"
depends_on:
db:
condition: service_healthy
redis:
condition: service_healthy
networks:
- mix_network
# Celery Worker для обработки задач
celery-worker:
build:
context: ..
dockerfile: docker/Dockerfile
container_name: mix_celery_worker
restart: unless-stopped
command: celery-worker
env_file:
- .env.docker
environment:
- DB_HOST=db
- REDIS_HOST=redis
volumes:
- ..:/app
- ../../media:/app/myproject/media
depends_on:
db:
condition: service_healthy
redis:
condition: service_healthy
networks:
- mix_network
# Celery Beat для периодических задач
celery-beat:
build:
context: ..
dockerfile: docker/Dockerfile
container_name: mix_celery_beat
restart: unless-stopped
command: celery-beat
env_file:
- .env.docker
environment:
- DB_HOST=db
- REDIS_HOST=redis
volumes:
- ..:/app
depends_on:
db:
condition: service_healthy
redis:
condition: service_healthy
networks:
- mix_network
networks:
mix_network:
driver: bridge

View File

@@ -0,0 +1,251 @@
#!/bin/bash
set -e
# Ожидание готовности PostgreSQL
wait_for_postgres() {
echo "Waiting for PostgreSQL..."
python -c "
import psycopg2
import os
import sys
dbname = os.environ.get('DB_NAME', 'inventory_db')
user = os.environ.get('DB_USER', 'postgres')
password = os.environ.get('DB_PASSWORD', 'postgres')
host = os.environ.get('DB_HOST', 'db')
port = os.environ.get('DB_PORT', '5432')
print(f'Attempting connection to: host={host} port={port} dbname={dbname} user={user}')
try:
conn = psycopg2.connect(
dbname=dbname,
user=user,
password=password,
host=host,
port=port
)
conn.close()
print('Connection successful!')
exit(0)
except Exception as e:
print(f'Error connecting to PostgreSQL: {e}', file=sys.stderr)
exit(1)
"
while [ $? -ne 0 ]; do
echo "PostgreSQL is unavailable - sleeping"
sleep 2
python -c "
import psycopg2
import os
import sys
try:
conn = psycopg2.connect(
dbname=os.environ.get('DB_NAME', 'inventory_db'),
user=os.environ.get('DB_USER', 'postgres'),
password=os.environ.get('DB_PASSWORD', 'postgres'),
host=os.environ.get('DB_HOST', 'db'),
port=os.environ.get('DB_PORT', '5432')
)
conn.close()
exit(0)
except Exception as e:
print(f'Retry error: {e}', file=sys.stderr)
exit(1)
"
done
echo "PostgreSQL is up!"
}
# Ожидание готовности Redis
wait_for_redis() {
echo "Waiting for Redis..."
python -c "
import redis
import os
import sys
host = os.environ.get('REDIS_HOST', 'redis')
port = int(os.environ.get('REDIS_PORT', '6379'))
db = int(os.environ.get('REDIS_DB', '0'))
print(f'Attempting connection to Redis: host={host} port={port} db={db}')
try:
r = redis.Redis(host=host, port=port, db=db)
r.ping()
print('Redis connection successful!')
exit(0)
except Exception as e:
print(f'Error connecting to Redis: {e}', file=sys.stderr)
exit(1)
"
while [ $? -ne 0 ]; do
echo "Redis is unavailable - sleeping"
sleep 2
python -c "
import redis
import os
import sys
try:
r = redis.Redis(
host=os.environ.get('REDIS_HOST', 'redis'),
port=int(os.environ.get('REDIS_PORT', '6379')),
db=int(os.environ.get('REDIS_DB', '0'))
)
r.ping()
exit(0)
except Exception as e:
print(f'Redis retry error: {e}', file=sys.stderr)
exit(1)
"
done
echo "Redis is up!"
}
# Создание папок media и staticfiles с правильными правами
setup_directories() {
echo "Setting up media and static directories..."
# Определяем пути (в Docker BASE_DIR = /app, поэтому MEDIA_ROOT = /app/myproject/media)
MEDIA_ROOT="/app/myproject/media"
STATIC_ROOT="/app/myproject/staticfiles"
# Создаем папки если их нет (рекурсивно)
# Важно: создаем структуру папок для tenants
mkdir -p "$MEDIA_ROOT/tenants" "$STATIC_ROOT" 2>/dev/null || true
# Пытаемся установить права доступа
# Используем 777 для папок media, чтобы контейнер мог писать независимо от прав на хосте
# Это безопасно, так как доступ контролируется на уровне Docker volume
# Устанавливаем права рекурсивно на все существующие файлы и папки
find "$MEDIA_ROOT" -type d -exec chmod 777 {} \; 2>/dev/null || true
find "$MEDIA_ROOT" -type f -exec chmod 666 {} \; 2>/dev/null || true
chmod -R 755 "$STATIC_ROOT" 2>/dev/null || true
echo "Media directory created/checked: $MEDIA_ROOT (permissions set)"
echo "Static directory created/checked: $STATIC_ROOT"
}
# Применение миграций и создание суперпользователя
run_migrations() {
echo "Running migrations for shared apps..."
python manage.py migrate_schemas --shared
echo "Running migrations for tenant schemas..."
python manage.py migrate_schemas --tenant
echo "Collecting static files..."
python manage.py collectstatic --noinput
# Устанавливаем права ПОСЛЕ collectstatic
echo "Setting permissions on static files..."
STATIC_ROOT="/app/myproject/staticfiles"
find "$STATIC_ROOT" -type d -exec chmod 755 {} \; 2>/dev/null || true
find "$STATIC_ROOT" -type f -exec chmod 644 {} \; 2>/dev/null || true
echo "Ensuring public tenant exists..."
python /app/docker/create_public_tenant.py
}
# Создание суперпользователя если не существует
create_superuser() {
echo "Creating superuser if not exists..."
python manage.py shell << EOF
from django.contrib.auth import get_user_model
from django.db import connection
from django_tenants.utils import schema_context
import os
User = get_user_model()
# Создаём суперпользователя в public схеме из переменных окружения
with schema_context('public'):
email = os.environ.get('TENANT_ADMIN_EMAIL', 'admin@example.com')
password = os.environ.get('TENANT_ADMIN_PASSWORD', 'changeme')
first_name = os.environ.get('TENANT_ADMIN_NAME', 'Admin')
if not User.objects.filter(email=email).exists():
user = User.objects.create_superuser(
email=email,
password=password,
name=first_name
)
print(f'Superuser {email} created successfully!')
else:
print(f'Superuser {email} already exists.')
EOF
}
# Если manage.py не в текущей директории, но есть в подпапке myproject
if [ ! -f "manage.py" ] && [ -d "myproject" ]; then
# Пытаемся войти в директорию, перенаправляя ошибки в /dev/null
if cd myproject 2>/dev/null; then
echo "Changing directory to myproject..."
# Устанавливаем PYTHONPATH чтобы Python мог найти модуль myproject
export PYTHONPATH=$(pwd):$PYTHONPATH
echo "PYTHONPATH set to: $PYTHONPATH"
else
# Если не можем войти в директорию (проблема с правами), устанавливаем PYTHONPATH из текущей директории
echo "Warning: Cannot access myproject directory (permission denied). Setting PYTHONPATH to include myproject..."
export PYTHONPATH=/app/myproject:$PYTHONPATH
echo "PYTHONPATH set to: $PYTHONPATH"
fi
fi
case "$1" in
web)
wait_for_postgres
wait_for_redis
setup_directories
run_migrations
create_superuser
echo "Starting Gunicorn..."
exec gunicorn myproject.wsgi:application \
--bind 0.0.0.0:8000 \
--workers 3 \
--threads 2 \
--timeout 120 \
--access-logfile - \
--error-logfile - \
--capture-output
;;
celery-worker)
wait_for_postgres
wait_for_redis
setup_directories
echo "Starting Celery Worker..."
exec celery -A myproject worker \
-l info \
-Q celery,photo_processing \
--concurrency=2
;;
celery-beat)
wait_for_postgres
wait_for_redis
echo "Starting Celery Beat..."
exec celery -A myproject beat -l info
;;
migrate)
wait_for_postgres
run_migrations
create_superuser
;;
collectstatic)
wait_for_postgres
setup_directories
echo "Collecting static files..."
python manage.py collectstatic --noinput
echo "Setting permissions on static files..."
STATIC_ROOT="/app/myproject/staticfiles"
find "$STATIC_ROOT" -type d -exec chmod 755 {} \; 2>/dev/null || true
find "$STATIC_ROOT" -type f -exec chmod 644 {} \; 2>/dev/null || true
echo "Static files collected and permissions set."
;;
shell)
exec python manage.py shell
;;
*)
exec "$@"
;;
esac

View File

@@ -0,0 +1,135 @@
#!/bin/bash
set -e
# Ожидание готовности PostgreSQL
wait_for_postgres() {
echo "Waiting for PostgreSQL..."
while ! python -c "
import psycopg2
import os
try:
conn = psycopg2.connect(
dbname=os.environ.get('DB_NAME', 'inventory_db'),
user=os.environ.get('DB_USER', 'postgres'),
password=os.environ.get('DB_PASSWORD', 'postgres'),
host=os.environ.get('DB_HOST', 'db'),
port=os.environ.get('DB_PORT', '5432')
)
conn.close()
exit(0)
except:
exit(1)
" 2>/dev/null; do
echo "PostgreSQL is unavailable - sleeping"
sleep 2
done
echo "PostgreSQL is up!"
}
# Ожидание готовности Redis
wait_for_redis() {
echo "Waiting for Redis..."
while ! python -c "
import redis
import os
try:
r = redis.Redis(
host=os.environ.get('REDIS_HOST', 'redis'),
port=int(os.environ.get('REDIS_PORT', '6379')),
db=int(os.environ.get('REDIS_DB', '0'))
)
r.ping()
exit(0)
except:
exit(1)
" 2>/dev/null; do
echo "Redis is unavailable - sleeping"
sleep 2
done
echo "Redis is up!"
}
# Применение миграций и создание суперпользователя
run_migrations() {
echo "Running migrations for shared apps..."
python manage.py migrate_schemas --shared
echo "Running migrations for tenant schemas..."
python manage.py migrate_schemas --tenant
echo "Collecting static files..."
python manage.py collectstatic --noinput
}
# Создание суперпользователя если не существует
create_superuser() {
echo "Creating superuser if not exists..."
python manage.py shell << EOF
from django.contrib.auth import get_user_model
from django.db import connection
from django_tenants.utils import schema_context
import os
User = get_user_model()
# Создаём суперпользователя в public схеме из переменных окружения
with schema_context('public'):
email = os.environ.get('TENANT_ADMIN_EMAIL', 'admin@example.com')
password = os.environ.get('TENANT_ADMIN_PASSWORD', 'changeme')
first_name = os.environ.get('TENANT_ADMIN_NAME', 'Admin')
if not User.objects.filter(email=email).exists():
user = User.objects.create_superuser(
email=email,
password=password,
first_name=first_name
)
print(f'Superuser {email} created successfully!')
else:
print(f'Superuser {email} already exists.')
EOF
}
case "$1" in
web)
wait_for_postgres
wait_for_redis
run_migrations
create_superuser
echo "Starting Gunicorn..."
exec gunicorn myproject.wsgi:application \
--bind 0.0.0.0:8000 \
--workers 3 \
--threads 2 \
--timeout 120 \
--access-logfile - \
--error-logfile - \
--capture-output
;;
celery-worker)
wait_for_postgres
wait_for_redis
echo "Starting Celery Worker..."
exec celery -A myproject worker \
-l info \
-Q celery,photo_processing \
--concurrency=2
;;
celery-beat)
wait_for_postgres
wait_for_redis
echo "Starting Celery Beat..."
exec celery -A myproject beat -l info
;;
migrate)
wait_for_postgres
run_migrations
create_superuser
;;
shell)
exec python manage.py shell
;;
*)
exec "$@"
;;
esac