fix: All in one docker image setup and build workflow (#4960)

* base images and action

* fix base docker files

* aio base image name change

* aio action changes

* aio base action cleanup

* aio file name changes

* base dockerfile updated for warning

* fixes

* action fix

* wip

* wip

* added app build

* checking aio app

* fixes

* fixes

* fixes to app images

* fix nginx

* fix action

* fix base

* modified dockerfie

* dockerfile fix

* fix

* pg-setup fix

* fix dockerfile-app

* fix

* fix: dockerfile-app

* added cache

* fix dockerfile

* wip

* wip

* wip

* wip

* wip

* wip

* wip

* wip

* wip

* modified base action caching

* modified aio-branch action

* wip

* feature action modified

* action modified

* test deployment

* fix action

* flatten branch name, uninstall helm before installing

* wip

* testing build

* test build

* action modified

* updated action

* feature helm deployment fix

* removed feature build cache

* enabled cache on aio app build

* removed aio.sh
This commit is contained in:
Manish Gupta 2024-06-28 13:08:13 +05:30 committed by GitHub
parent 761fbe3ffb
commit b5a2e5c727
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
15 changed files with 1418 additions and 474 deletions

View File

@ -16,36 +16,44 @@ jobs:
gh_buildx_version: ${{ steps.set_env_variables.outputs.BUILDX_VERSION }}
gh_buildx_platforms: ${{ steps.set_env_variables.outputs.BUILDX_PLATFORMS }}
gh_buildx_endpoint: ${{ steps.set_env_variables.outputs.BUILDX_ENDPOINT }}
build_base: ${{ steps.changed_files.outputs.base_any_changed }}
image_tag: ${{ steps.set_env_variables.outputs.IMAGE_TAG }}
steps:
- id: set_env_variables
name: Set Environment Variables
run: |
echo "BUILDX_DRIVER=cloud" >> $GITHUB_OUTPUT
echo "BUILDX_VERSION=lab:latest" >> $GITHUB_OUTPUT
echo "BUILDX_PLATFORMS=linux/amd64,linux/arm64" >> $GITHUB_OUTPUT
echo "BUILDX_ENDPOINT=makeplane/plane-dev" >> $GITHUB_OUTPUT
echo "TARGET_BRANCH=${{ env.TARGET_BRANCH }}" >> $GITHUB_OUTPUT
if [ "${{ env.TARGET_BRANCH }}" == "master" ]; then
echo "BUILDX_DRIVER=cloud" >> $GITHUB_OUTPUT
echo "BUILDX_VERSION=lab:latest" >> $GITHUB_OUTPUT
echo "BUILDX_PLATFORMS=linux/amd64,linux/arm64" >> $GITHUB_OUTPUT
echo "BUILDX_ENDPOINT=makeplane/plane-dev" >> $GITHUB_OUTPUT
echo "TARGET_BRANCH=${{ env.TARGET_BRANCH }}" >> $GITHUB_OUTPUT
echo "IMAGE_TAG=latest" >> $GITHUB_OUTPUT
else
echo "BUILDX_DRIVER=docker-container" >> $GITHUB_OUTPUT
echo "BUILDX_VERSION=latest" >> $GITHUB_OUTPUT
echo "BUILDX_PLATFORMS=linux/amd64" >> $GITHUB_OUTPUT
echo "BUILDX_ENDPOINT=" >> $GITHUB_OUTPUT
if [ "${{ env.TARGET_BRANCH }}" == "preview" ]; then
echo "TARGET_BRANCH=preview" >> $GITHUB_OUTPUT
echo "IMAGE_TAG=preview" >> $GITHUB_OUTPUT
else
echo "TARGET_BRANCH=develop" >> $GITHUB_OUTPUT
echo "IMAGE_TAG=develop" >> $GITHUB_OUTPUT
fi
fi
- id: checkout_files
name: Checkout Files
uses: actions/checkout@v4
- name: Get changed files
id: changed_files
uses: tj-actions/changed-files@v42
with:
files_yaml: |
base:
- aio/Dockerfile.base
base_build_push:
if: ${{ needs.base_build_setup.outputs.build_base == 'true' || github.event_name == 'workflow_dispatch' || needs.base_build_setup.outputs.gh_branch_name == 'master' }}
full_base_build_push:
runs-on: ubuntu-latest
needs: [base_build_setup]
env:
BASE_IMG_TAG: makeplane/plane-aio-base:${{ needs.base_build_setup.outputs.gh_branch_name }}
BASE_IMG_TAG: makeplane/plane-aio-base:full-${{ needs.base_build_setup.outputs.image_tag }}
TARGET_BRANCH: ${{ needs.base_build_setup.outputs.gh_branch_name }}
BUILDX_DRIVER: ${{ needs.base_build_setup.outputs.gh_buildx_driver }}
BUILDX_VERSION: ${{ needs.base_build_setup.outputs.gh_buildx_version }}
@ -55,15 +63,6 @@ jobs:
- name: Check out the repo
uses: actions/checkout@v4
- name: Set Docker Tag
run: |
if [ "${{ env.TARGET_BRANCH }}" == "master" ]; then
TAG=makeplane/plane-aio-base:latest
else
TAG=${{ env.BASE_IMG_TAG }}
fi
echo "BASE_IMG_TAG=${TAG}" >> $GITHUB_ENV
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
@ -81,10 +80,54 @@ jobs:
uses: docker/build-push-action@v5.1.0
with:
context: ./aio
file: ./aio/Dockerfile.base
file: ./aio/Dockerfile-base-full
platforms: ${{ env.BUILDX_PLATFORMS }}
tags: ${{ env.BASE_IMG_TAG }}
push: true
cache-from: type=gha
cache-to: type=gha,mode=max
env:
DOCKER_BUILDKIT: 1
DOCKER_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }}
DOCKER_PASSWORD: ${{ secrets.DOCKERHUB_TOKEN }}
slim_base_build_push:
runs-on: ubuntu-latest
needs: [base_build_setup]
env:
BASE_IMG_TAG: makeplane/plane-aio-base:slim-${{ needs.base_build_setup.outputs.image_tag }}
TARGET_BRANCH: ${{ needs.base_build_setup.outputs.gh_branch_name }}
BUILDX_DRIVER: ${{ needs.base_build_setup.outputs.gh_buildx_driver }}
BUILDX_VERSION: ${{ needs.base_build_setup.outputs.gh_buildx_version }}
BUILDX_PLATFORMS: ${{ needs.base_build_setup.outputs.gh_buildx_platforms }}
BUILDX_ENDPOINT: ${{ needs.base_build_setup.outputs.gh_buildx_endpoint }}
steps:
- name: Check out the repo
uses: actions/checkout@v4
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
driver: ${{ env.BUILDX_DRIVER }}
version: ${{ env.BUILDX_VERSION }}
endpoint: ${{ env.BUILDX_ENDPOINT }}
- name: Build and Push to Docker Hub
uses: docker/build-push-action@v5.1.0
with:
context: ./aio
file: ./aio/Dockerfile-base-slim
platforms: ${{ env.BUILDX_PLATFORMS }}
tags: ${{ env.BASE_IMG_TAG }}
push: true
cache-from: type=gha
cache-to: type=gha,mode=max
env:
DOCKER_BUILDKIT: 1
DOCKER_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }}

197
.github/workflows/build-aio-branch.yml vendored Normal file
View File

@ -0,0 +1,197 @@
name: Branch Build AIO
on:
workflow_dispatch:
inputs:
full:
description: 'Run full build'
type: boolean
required: false
default: false
slim:
description: 'Run slim build'
type: boolean
required: false
default: false
release:
types: [released, prereleased]
env:
TARGET_BRANCH: ${{ github.ref_name || github.event.release.target_commitish }}
FULL_BUILD_INPUT: ${{ github.event.inputs.full }}
SLIM_BUILD_INPUT: ${{ github.event.inputs.slim }}
jobs:
branch_build_setup:
name: Build Setup
runs-on: ubuntu-latest
outputs:
gh_branch_name: ${{ steps.set_env_variables.outputs.TARGET_BRANCH }}
gh_buildx_driver: ${{ steps.set_env_variables.outputs.BUILDX_DRIVER }}
gh_buildx_version: ${{ steps.set_env_variables.outputs.BUILDX_VERSION }}
gh_buildx_platforms: ${{ steps.set_env_variables.outputs.BUILDX_PLATFORMS }}
gh_buildx_endpoint: ${{ steps.set_env_variables.outputs.BUILDX_ENDPOINT }}
aio_base_tag: ${{ steps.set_env_variables.outputs.AIO_BASE_TAG }}
do_full_build: ${{ steps.set_env_variables.outputs.DO_FULL_BUILD }}
do_slim_build: ${{ steps.set_env_variables.outputs.DO_SLIM_BUILD }}
steps:
- id: set_env_variables
name: Set Environment Variables
run: |
if [ "${{ env.TARGET_BRANCH }}" == "master" ] || [ "${{ github.event_name }}" == "release" ]; then
echo "BUILDX_DRIVER=cloud" >> $GITHUB_OUTPUT
echo "BUILDX_VERSION=lab:latest" >> $GITHUB_OUTPUT
echo "BUILDX_PLATFORMS=linux/amd64,linux/arm64" >> $GITHUB_OUTPUT
echo "BUILDX_ENDPOINT=makeplane/plane-dev" >> $GITHUB_OUTPUT
echo "AIO_BASE_TAG=latest" >> $GITHUB_OUTPUT
else
echo "BUILDX_DRIVER=docker-container" >> $GITHUB_OUTPUT
echo "BUILDX_VERSION=latest" >> $GITHUB_OUTPUT
echo "BUILDX_PLATFORMS=linux/amd64" >> $GITHUB_OUTPUT
echo "BUILDX_ENDPOINT=" >> $GITHUB_OUTPUT
if [ "${{ env.TARGET_BRANCH }}" == "preview" ]; then
echo "AIO_BASE_TAG=preview" >> $GITHUB_OUTPUT
else
echo "AIO_BASE_TAG=develop" >> $GITHUB_OUTPUT
fi
fi
echo "TARGET_BRANCH=${{ env.TARGET_BRANCH }}" >> $GITHUB_OUTPUT
if [ "${{ env.FULL_BUILD_INPUT }}" == "true" ] || [ "${{github.event_name}}" == "push" ] || [ "${{github.event_name}}" == "release" ]; then
echo "DO_FULL_BUILD=true" >> $GITHUB_OUTPUT
else
echo "DO_FULL_BUILD=false" >> $GITHUB_OUTPUT
fi
if [ "${{ env.SLIM_BUILD_INPUT }}" == "true" ] || [ "${{github.event_name}}" == "push" ] || [ "${{github.event_name}}" == "release" ]; then
echo "DO_SLIM_BUILD=true" >> $GITHUB_OUTPUT
else
echo "DO_SLIM_BUILD=false" >> $GITHUB_OUTPUT
fi
- id: checkout_files
name: Checkout Files
uses: actions/checkout@v4
full_build_push:
if: ${{ needs.branch_build_setup.outputs.do_full_build == 'true' }}
runs-on: ubuntu-20.04
needs: [branch_build_setup]
env:
BUILD_TYPE: full
AIO_BASE_TAG: ${{ needs.branch_build_setup.outputs.aio_base_tag }}
AIO_IMAGE_TAGS: makeplane/plane-aio:full-${{ needs.branch_build_setup.outputs.gh_branch_name }}
TARGET_BRANCH: ${{ needs.branch_build_setup.outputs.gh_branch_name }}
BUILDX_DRIVER: ${{ needs.branch_build_setup.outputs.gh_buildx_driver }}
BUILDX_VERSION: ${{ needs.branch_build_setup.outputs.gh_buildx_version }}
BUILDX_PLATFORMS: ${{ needs.branch_build_setup.outputs.gh_buildx_platforms }}
BUILDX_ENDPOINT: ${{ needs.branch_build_setup.outputs.gh_buildx_endpoint }}
steps:
- name: Set Docker Tag
run: |
if [ "${{ github.event_name }}" == "release" ]; then
TAG=makeplane/plane-aio:${{env.BUILD_TYPE}}-stable,makeplane/plane-aio:${{env.BUILD_TYPE}}-${{ github.event.release.tag_name }}
elif [ "${{ env.TARGET_BRANCH }}" == "master" ]; then
TAG=makeplane/plane-aio:${{env.BUILD_TYPE}}-latest
else
TAG=${{ env.AIO_IMAGE_TAGS }}
fi
echo "AIO_IMAGE_TAGS=${TAG}" >> $GITHUB_ENV
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
driver: ${{ env.BUILDX_DRIVER }}
version: ${{ env.BUILDX_VERSION }}
endpoint: ${{ env.BUILDX_ENDPOINT }}
- name: Check out the repo
uses: actions/checkout@v4
- name: Build and Push to Docker Hub
uses: docker/build-push-action@v5.1.0
with:
context: .
file: ./aio/Dockerfile-app
platforms: ${{ env.BUILDX_PLATFORMS }}
tags: ${{ env.AIO_IMAGE_TAGS }}
push: true
build-args: |
BUILD_TAG=${{ env.AIO_BASE_TAG }}
BUILD_TYPE=${{env.BUILD_TYPE}}
cache-from: type=gha
cache-to: type=gha,mode=max
env:
DOCKER_BUILDKIT: 1
DOCKER_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }}
DOCKER_PASSWORD: ${{ secrets.DOCKERHUB_TOKEN }}
slim_build_push:
if: ${{ needs.branch_build_setup.outputs.do_slim_build == 'true' }}
runs-on: ubuntu-20.04
needs: [branch_build_setup]
env:
BUILD_TYPE: slim
AIO_BASE_TAG: ${{ needs.branch_build_setup.outputs.aio_base_tag }}
AIO_IMAGE_TAGS: makeplane/plane-aio:slim-${{ needs.branch_build_setup.outputs.gh_branch_name }}
TARGET_BRANCH: ${{ needs.branch_build_setup.outputs.gh_branch_name }}
BUILDX_DRIVER: ${{ needs.branch_build_setup.outputs.gh_buildx_driver }}
BUILDX_VERSION: ${{ needs.branch_build_setup.outputs.gh_buildx_version }}
BUILDX_PLATFORMS: ${{ needs.branch_build_setup.outputs.gh_buildx_platforms }}
BUILDX_ENDPOINT: ${{ needs.branch_build_setup.outputs.gh_buildx_endpoint }}
steps:
- name: Set Docker Tag
run: |
if [ "${{ github.event_name }}" == "release" ]; then
TAG=makeplane/plane-aio:${{env.BUILD_TYPE}}-stable,makeplane/plane-aio:${{env.BUILD_TYPE}}-${{ github.event.release.tag_name }}
elif [ "${{ env.TARGET_BRANCH }}" == "master" ]; then
TAG=makeplane/plane-aio:${{env.BUILD_TYPE}}-latest
else
TAG=${{ env.AIO_IMAGE_TAGS }}
fi
echo "AIO_IMAGE_TAGS=${TAG}" >> $GITHUB_ENV
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
driver: ${{ env.BUILDX_DRIVER }}
version: ${{ env.BUILDX_VERSION }}
endpoint: ${{ env.BUILDX_ENDPOINT }}
- name: Check out the repo
uses: actions/checkout@v4
- name: Build and Push to Docker Hub
uses: docker/build-push-action@v5.1.0
with:
context: .
file: ./aio/Dockerfile-app
platforms: ${{ env.BUILDX_PLATFORMS }}
tags: ${{ env.AIO_IMAGE_TAGS }}
push: true
build-args: |
BUILD_TAG=${{ env.AIO_BASE_TAG }}
BUILD_TYPE=${{env.BUILD_TYPE}}
cache-from: type=gha
cache-to: type=gha,mode=max
env:
DOCKER_BUILDKIT: 1
DOCKER_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }}
DOCKER_PASSWORD: ${{ secrets.DOCKERHUB_TOKEN }}

View File

@ -2,190 +2,99 @@ name: Feature Preview
on:
workflow_dispatch:
inputs:
web-build:
required: false
description: "Build Web"
type: boolean
default: true
space-build:
required: false
description: "Build Space"
type: boolean
default: false
admin-build:
required: false
description: "Build Admin"
type: boolean
default: false
env:
BUILD_WEB: ${{ github.event.inputs.web-build }}
BUILD_SPACE: ${{ github.event.inputs.space-build }}
BUILD_ADMIN: ${{ github.event.inputs.admin-build }}
TARGET_BRANCH: ${{ github.ref_name }}
jobs:
setup-feature-build:
name: Feature Build Setup
branch_build_setup:
name: Build Setup
runs-on: ubuntu-latest
steps:
- name: Checkout
run: |
echo "BUILD_WEB=$BUILD_WEB"
echo "BUILD_SPACE=$BUILD_SPACE"
echo "BUILD_ADMIN=$BUILD_ADMIN"
outputs:
web-build: ${{ env.BUILD_WEB}}
space-build: ${{env.BUILD_SPACE}}
admin-build: ${{env.BUILD_ADMIN}}
gh_branch_name: ${{ steps.set_env_variables.outputs.TARGET_BRANCH }}
flat_branch_name: ${{ steps.set_env_variables.outputs.FLAT_BRANCH_NAME }}
gh_buildx_driver: ${{ steps.set_env_variables.outputs.BUILDX_DRIVER }}
gh_buildx_version: ${{ steps.set_env_variables.outputs.BUILDX_VERSION }}
gh_buildx_platforms: ${{ steps.set_env_variables.outputs.BUILDX_PLATFORMS }}
gh_buildx_endpoint: ${{ steps.set_env_variables.outputs.BUILDX_ENDPOINT }}
aio_base_tag: ${{ steps.set_env_variables.outputs.AIO_BASE_TAG }}
do_full_build: ${{ steps.set_env_variables.outputs.DO_FULL_BUILD }}
do_slim_build: ${{ steps.set_env_variables.outputs.DO_SLIM_BUILD }}
feature-build-web:
if: ${{ needs.setup-feature-build.outputs.web-build == 'true' }}
needs: setup-feature-build
name: Feature Build Web
runs-on: ubuntu-latest
env:
AWS_ACCESS_KEY_ID: ${{ vars.FEATURE_PREVIEW_AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.FEATURE_PREVIEW_AWS_SECRET_ACCESS_KEY }}
AWS_BUCKET: ${{ vars.FEATURE_PREVIEW_AWS_BUCKET }}
NEXT_PUBLIC_API_BASE_URL: ${{ vars.FEATURE_PREVIEW_NEXT_PUBLIC_API_BASE_URL }}
steps:
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: "18"
- name: Install AWS cli
- id: set_env_variables
name: Set Environment Variables
run: |
sudo apt-get update
sudo apt-get install -y python3-pip
pip3 install awscli
- name: Checkout
echo "BUILDX_DRIVER=docker-container" >> $GITHUB_OUTPUT
echo "BUILDX_VERSION=latest" >> $GITHUB_OUTPUT
echo "BUILDX_PLATFORMS=linux/amd64" >> $GITHUB_OUTPUT
echo "BUILDX_ENDPOINT=" >> $GITHUB_OUTPUT
echo "AIO_BASE_TAG=develop" >> $GITHUB_OUTPUT
echo "TARGET_BRANCH=${{ env.TARGET_BRANCH }}" >> $GITHUB_OUTPUT
FLAT_BRANCH_NAME=$(echo "${{ env.TARGET_BRANCH }}" | sed 's/[^a-zA-Z0-9]/-/g')
echo "FLAT_BRANCH_NAME=$FLAT_BRANCH_NAME" >> $GITHUB_OUTPUT
- id: checkout_files
name: Checkout Files
uses: actions/checkout@v4
with:
path: plane
- name: Install Dependencies
run: |
cd $GITHUB_WORKSPACE/plane
yarn install
- name: Build Web
id: build-web
run: |
cd $GITHUB_WORKSPACE/plane
yarn build --filter=web
cd $GITHUB_WORKSPACE
TAR_NAME="web.tar.gz"
tar -czf $TAR_NAME ./plane
FILE_EXPIRY=$(date -u -d "+2 days" +"%Y-%m-%dT%H:%M:%SZ")
aws s3 cp $TAR_NAME s3://${{ env.AWS_BUCKET }}/${{github.sha}}/$TAR_NAME --expires $FILE_EXPIRY
feature-build-space:
if: ${{ needs.setup-feature-build.outputs.space-build == 'true' }}
needs: setup-feature-build
name: Feature Build Space
runs-on: ubuntu-latest
full_build_push:
runs-on: ubuntu-20.04
needs: [branch_build_setup]
env:
AWS_ACCESS_KEY_ID: ${{ vars.FEATURE_PREVIEW_AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.FEATURE_PREVIEW_AWS_SECRET_ACCESS_KEY }}
AWS_BUCKET: ${{ vars.FEATURE_PREVIEW_AWS_BUCKET }}
NEXT_PUBLIC_SPACE_BASE_PATH: "/spaces"
NEXT_PUBLIC_API_BASE_URL: ${{ vars.FEATURE_PREVIEW_NEXT_PUBLIC_API_BASE_URL }}
BUILD_TYPE: full
AIO_BASE_TAG: ${{ needs.branch_build_setup.outputs.aio_base_tag }}
AIO_IMAGE_TAGS: makeplane/plane-aio-feature:${{ needs.branch_build_setup.outputs.flat_branch_name }}
BUILDX_DRIVER: ${{ needs.branch_build_setup.outputs.gh_buildx_driver }}
BUILDX_VERSION: ${{ needs.branch_build_setup.outputs.gh_buildx_version }}
BUILDX_PLATFORMS: ${{ needs.branch_build_setup.outputs.gh_buildx_platforms }}
BUILDX_ENDPOINT: ${{ needs.branch_build_setup.outputs.gh_buildx_endpoint }}
steps:
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
driver: ${{ env.BUILDX_DRIVER }}
version: ${{ env.BUILDX_VERSION }}
endpoint: ${{ env.BUILDX_ENDPOINT }}
- name: Check out the repo
uses: actions/checkout@v4
- name: Build and Push to Docker Hub
uses: docker/build-push-action@v5.1.0
with:
context: .
file: ./aio/Dockerfile-app
platforms: ${{ env.BUILDX_PLATFORMS }}
tags: ${{ env.AIO_IMAGE_TAGS }}
push: true
build-args:
BUILD_TAG=${{ env.AIO_BASE_TAG }}
BUILD_TYPE=${{env.BUILD_TYPE}}
# cache-from: type=gha
# cache-to: type=gha,mode=max
env:
DOCKER_BUILDKIT: 1
DOCKER_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }}
DOCKER_PASSWORD: ${{ secrets.DOCKERHUB_TOKEN }}
outputs:
do-build: ${{ needs.setup-feature-build.outputs.space-build }}
s3-url: ${{ steps.build-space.outputs.S3_PRESIGNED_URL }}
steps:
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: "18"
- name: Install AWS cli
run: |
sudo apt-get update
sudo apt-get install -y python3-pip
pip3 install awscli
- name: Checkout
uses: actions/checkout@v4
with:
path: plane
- name: Install Dependencies
run: |
cd $GITHUB_WORKSPACE/plane
yarn install
- name: Build Space
id: build-space
run: |
cd $GITHUB_WORKSPACE/plane
yarn build --filter=space
cd $GITHUB_WORKSPACE
TAR_NAME="space.tar.gz"
tar -czf $TAR_NAME ./plane
FILE_EXPIRY=$(date -u -d "+2 days" +"%Y-%m-%dT%H:%M:%SZ")
aws s3 cp $TAR_NAME s3://${{ env.AWS_BUCKET }}/${{github.sha}}/$TAR_NAME --expires $FILE_EXPIRY
feature-build-admin:
if: ${{ needs.setup-feature-build.outputs.admin-build == 'true' }}
needs: setup-feature-build
name: Feature Build Admin
runs-on: ubuntu-latest
env:
AWS_ACCESS_KEY_ID: ${{ vars.FEATURE_PREVIEW_AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.FEATURE_PREVIEW_AWS_SECRET_ACCESS_KEY }}
AWS_BUCKET: ${{ vars.FEATURE_PREVIEW_AWS_BUCKET }}
NEXT_PUBLIC_ADMIN_BASE_PATH: "/god-mode"
NEXT_PUBLIC_API_BASE_URL: ${{ vars.FEATURE_PREVIEW_NEXT_PUBLIC_API_BASE_URL }}
outputs:
do-build: ${{ needs.setup-feature-build.outputs.admin-build }}
s3-url: ${{ steps.build-admin.outputs.S3_PRESIGNED_URL }}
steps:
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: "18"
- name: Install AWS cli
run: |
sudo apt-get update
sudo apt-get install -y python3-pip
pip3 install awscli
- name: Checkout
uses: actions/checkout@v4
with:
path: plane
- name: Install Dependencies
run: |
cd $GITHUB_WORKSPACE/plane
yarn install
- name: Build Admin
id: build-admin
run: |
cd $GITHUB_WORKSPACE/plane
yarn build --filter=admin
cd $GITHUB_WORKSPACE
TAR_NAME="admin.tar.gz"
tar -czf $TAR_NAME ./plane
FILE_EXPIRY=$(date -u -d "+2 days" +"%Y-%m-%dT%H:%M:%SZ")
aws s3 cp $TAR_NAME s3://${{ env.AWS_BUCKET }}/${{github.sha}}/$TAR_NAME --expires $FILE_EXPIRY
AIO_IMAGE_TAGS: ${{ env.AIO_IMAGE_TAGS }}
feature-deploy:
if: ${{ always() && (needs.setup-feature-build.outputs.web-build == 'true' || needs.setup-feature-build.outputs.space-build == 'true' || needs.setup-feature-build.outputs.admin-build == 'true') }}
needs:
[
setup-feature-build,
feature-build-web,
feature-build-space,
feature-build-admin,
]
needs: [branch_build_setup, full_build_push]
name: Feature Deploy
runs-on: ubuntu-latest
env:
AWS_ACCESS_KEY_ID: ${{ vars.FEATURE_PREVIEW_AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.FEATURE_PREVIEW_AWS_SECRET_ACCESS_KEY }}
AWS_BUCKET: ${{ vars.FEATURE_PREVIEW_AWS_BUCKET }}
KUBE_CONFIG_FILE: ${{ secrets.FEATURE_PREVIEW_KUBE_CONFIG }}
DEPLOYMENT_NAME: ${{ needs.branch_build_setup.outputs.flat_branch_name }}
steps:
- name: Install AWS cli
run: |
@ -213,54 +122,37 @@ jobs:
./get_helm.sh
- name: App Deploy
run: |
WEB_S3_URL=""
if [ ${{ env.BUILD_WEB }} == true ]; then
WEB_S3_URL=$(aws s3 presign s3://${{ vars.FEATURE_PREVIEW_AWS_BUCKET }}/${{github.sha}}/web.tar.gz --expires-in 3600)
fi
helm --kube-insecure-skip-tls-verify repo add feature-preview ${{ vars.FEATURE_PREVIEW_HELM_CHART_URL }}
SPACE_S3_URL=""
if [ ${{ env.BUILD_SPACE }} == true ]; then
SPACE_S3_URL=$(aws s3 presign s3://${{ vars.FEATURE_PREVIEW_AWS_BUCKET }}/${{github.sha}}/space.tar.gz --expires-in 3600)
fi
APP_NAMESPACE="${{ vars.FEATURE_PREVIEW_NAMESPACE }}"
ADMIN_S3_URL=""
if [ ${{ env.BUILD_ADMIN }} == true ]; then
ADMIN_S3_URL=$(aws s3 presign s3://${{ vars.FEATURE_PREVIEW_AWS_BUCKET }}/${{github.sha}}/admin.tar.gz --expires-in 3600)
fi
helm --kube-insecure-skip-tls-verify uninstall \
${{ env.DEPLOYMENT_NAME }} \
--namespace $APP_NAMESPACE \
--timeout 10m0s \
--wait \
--ignore-not-found
if [ ${{ env.BUILD_WEB }} == true ] || [ ${{ env.BUILD_SPACE }} == true ] || [ ${{ env.BUILD_ADMIN }} == true ]; then
METADATA=$(helm --kube-insecure-skip-tls-verify upgrade \
--install=true \
--namespace $APP_NAMESPACE \
--set dockerhub.loginid=${{ secrets.DOCKERHUB_USERNAME }} \
--set dockerhub.password=${{ secrets.DOCKERHUB_TOKEN_RO}} \
--set config.feature_branch=${{ env.DEPLOYMENT_NAME }} \
--set ingress.primaryDomain=${{vars.FEATURE_PREVIEW_PRIMARY_DOMAIN || 'feature.plane.tools' }} \
--set ingress.tls_secret=${{vars.FEATURE_PREVIEW_INGRESS_TLS_SECRET || '' }} \
--output json \
--timeout 10m0s \
--wait \
${{ env.DEPLOYMENT_NAME }} feature-preview/${{ vars.FEATURE_PREVIEW_HELM_CHART_NAME }} )
helm --kube-insecure-skip-tls-verify repo add feature-preview ${{ vars.FEATURE_PREVIEW_HELM_CHART_URL }}
APP_NAME=$(echo $METADATA | jq -r '.name')
APP_NAMESPACE="${{ vars.FEATURE_PREVIEW_NAMESPACE }}"
DEPLOY_SCRIPT_URL="${{ vars.FEATURE_PREVIEW_DEPLOY_SCRIPT_URL }}"
INGRESS_HOSTNAME=$(kubectl get ingress -n $APP_NAMESPACE --insecure-skip-tls-verify \
-o jsonpath='{.items[?(@.metadata.annotations.meta\.helm\.sh\/release-name=="'$APP_NAME'")]}' | \
jq -r '.spec.rules[0].host')
METADATA=$(helm --kube-insecure-skip-tls-verify install feature-preview/${{ vars.FEATURE_PREVIEW_HELM_CHART_NAME }} \
--generate-name \
--namespace $APP_NAMESPACE \
--set ingress.primaryDomain=${{vars.FEATURE_PREVIEW_PRIMARY_DOMAIN || 'feature.plane.tools' }} \
--set web.image=${{vars.FEATURE_PREVIEW_DOCKER_BASE}} \
--set web.enabled=${{ env.BUILD_WEB || false }} \
--set web.artifact_url=$WEB_S3_URL \
--set space.image=${{vars.FEATURE_PREVIEW_DOCKER_BASE}} \
--set space.enabled=${{ env.BUILD_SPACE || false }} \
--set space.artifact_url=$SPACE_S3_URL \
--set admin.image=${{vars.FEATURE_PREVIEW_DOCKER_BASE}} \
--set admin.enabled=${{ env.BUILD_ADMIN || false }} \
--set admin.artifact_url=$ADMIN_S3_URL \
--set shared_config.deploy_script_url=$DEPLOY_SCRIPT_URL \
--set shared_config.api_base_url=${{vars.FEATURE_PREVIEW_NEXT_PUBLIC_API_BASE_URL}} \
--output json \
--timeout 1000s)
APP_NAME=$(echo $METADATA | jq -r '.name')
INGRESS_HOSTNAME=$(kubectl get ingress -n feature-builds --insecure-skip-tls-verify \
-o jsonpath='{.items[?(@.metadata.annotations.meta\.helm\.sh\/release-name=="'$APP_NAME'")]}' | \
jq -r '.spec.rules[0].host')
echo "****************************************"
echo "APP NAME ::: $APP_NAME"
echo "INGRESS HOSTNAME ::: $INGRESS_HOSTNAME"
echo "****************************************"
fi
echo "****************************************"
echo "APP NAME ::: $APP_NAME"
echo "INGRESS HOSTNAME ::: $INGRESS_HOSTNAME"
echo "****************************************"

View File

@ -1,3 +1,5 @@
ARG BASE_TAG=develop
ARG BUILD_TYPE=full
# *****************************************************************************
# STAGE 1: Build the project
# *****************************************************************************
@ -5,7 +7,6 @@ FROM node:18-alpine AS builder
RUN apk add --no-cache libc6-compat
# Set working directory
WORKDIR /app
ENV NEXT_PUBLIC_API_BASE_URL=http://NEXT_PUBLIC_API_BASE_URL_PLACEHOLDER
RUN yarn global add turbo
COPY . .
@ -46,16 +47,18 @@ ENV NEXT_PUBLIC_SPACE_BASE_URL=$NEXT_PUBLIC_SPACE_BASE_URL
ARG NEXT_PUBLIC_SPACE_BASE_PATH="/spaces"
ENV NEXT_PUBLIC_SPACE_BASE_PATH=$NEXT_PUBLIC_SPACE_BASE_PATH
ENV NEXT_TELEMETRY_DISABLED 1
ENV TURBO_TELEMETRY_DISABLED 1
ARG NEXT_PUBLIC_WEB_BASE_URL=""
ENV NEXT_PUBLIC_WEB_BASE_URL=$NEXT_PUBLIC_WEB_BASE_URL
RUN yarn turbo run build
ENV NEXT_TELEMETRY_DISABLED=1
ENV TURBO_TELEMETRY_DISABLED=1
RUN yarn turbo run build --filter=web --filter=space --filter=admin
# *****************************************************************************
# STAGE 3: Copy the project and start it
# *****************************************************************************
# FROM makeplane/plane-aio-base AS runner
FROM makeplane/plane-aio-base:develop AS runner
FROM makeplane/plane-aio-base:${BUILD_TYPE}-${BASE_TAG} AS runner
WORKDIR /app
@ -63,17 +66,14 @@ SHELL [ "/bin/bash", "-c" ]
# PYTHON APPLICATION SETUP
ENV PYTHONDONTWRITEBYTECODE 1
ENV PYTHONUNBUFFERED 1
ENV PYTHONDONTWRITEBYTECODE=1
ENV PYTHONUNBUFFERED=1
ENV PIP_DISABLE_PIP_VERSION_CHECK=1
COPY apiserver/requirements.txt ./api/
COPY apiserver/requirements ./api/requirements
RUN python3.12 -m venv /app/venv && \
source /app/venv/bin/activate && \
/app/venv/bin/pip install --upgrade pip && \
/app/venv/bin/pip install -r ./api/requirements.txt --compile --no-cache-dir
RUN pip install -r ./api/requirements.txt --compile --no-cache-dir
# Add in Django deps and generate Django's static files
COPY apiserver/manage.py ./api/manage.py
@ -87,7 +87,6 @@ RUN chmod +x ./api/bin/*
RUN chmod -R 777 ./api/
# NEXTJS BUILDS
COPY --from=installer /app/web/next.config.js ./web/
COPY --from=installer /app/web/package.json ./web/
COPY --from=installer /app/web/.next/standalone ./web
@ -124,26 +123,63 @@ ENV NEXT_PUBLIC_SPACE_BASE_PATH=$NEXT_PUBLIC_SPACE_BASE_PATH
ARG NEXT_PUBLIC_WEB_BASE_URL=""
ENV NEXT_PUBLIC_WEB_BASE_URL=$NEXT_PUBLIC_WEB_BASE_URL
ENV NEXT_TELEMETRY_DISABLED 1
ENV TURBO_TELEMETRY_DISABLED 1
ENV NEXT_TELEMETRY_DISABLED=1
ENV TURBO_TELEMETRY_DISABLED=1
COPY aio/supervisord.conf /app/supervisord.conf
ARG BUILD_TYPE=full
ENV BUILD_TYPE=$BUILD_TYPE
COPY aio/aio.sh /app/aio.sh
RUN chmod +x /app/aio.sh
COPY aio/supervisord-${BUILD_TYPE}-base /app/supervisord.conf
COPY aio/supervisord-app /app/supervisord-app
RUN cat /app/supervisord-app >> /app/supervisord.conf && \
rm /app/supervisord-app
COPY ./aio/nginx.conf /etc/nginx/nginx.conf.template
# if build type is full, run the below copy pg-setup.sh
COPY aio/postgresql.conf /etc/postgresql/postgresql.conf
COPY aio/pg-setup.sh /app/pg-setup.sh
RUN chmod +x /app/pg-setup.sh
COPY deploy/selfhost/variables.env /app/plane.env
# *****************************************************************************
# APPLICATION ENVIRONMENT SETTINGS
# *****************************************************************************
ENV APP_DOMAIN=localhost
# NGINX Conf Copy
COPY ./aio/nginx.conf.aio /etc/nginx/nginx.conf.template
COPY ./nginx/env.sh /app/nginx-start.sh
RUN chmod +x /app/nginx-start.sh
ENV WEB_URL=http://${APP_DOMAIN}
ENV DEBUG=0
ENV SENTRY_DSN=
ENV SENTRY_ENVIRONMENT=production
ENV CORS_ALLOWED_ORIGINS=http://${APP_DOMAIN},https://${APP_DOMAIN}
# Secret Key
ENV SECRET_KEY=60gp0byfz2dvffa45cxl20p1scy9xbpf6d8c5y0geejgkyp1b5
# Gunicorn Workers
ENV GUNICORN_WORKERS=1
RUN ./pg-setup.sh
ENV POSTGRES_USER="plane"
ENV POSTGRES_PASSWORD="plane"
ENV POSTGRES_DB="plane"
ENV POSTGRES_HOST="localhost"
ENV POSTGRES_PORT="5432"
ENV DATABASE_URL="postgresql://plane:plane@localhost:5432/plane"
VOLUME [ "/app/data/minio/uploads", "/var/lib/postgresql/data" ]
ENV REDIS_HOST="localhost"
ENV REDIS_PORT="6379"
ENV REDIS_URL="redis://localhost:6379"
ENV USE_MINIO="1"
ENV AWS_REGION=""
ENV AWS_ACCESS_KEY_ID="access-key"
ENV AWS_SECRET_ACCESS_KEY="secret-key"
ENV AWS_S3_ENDPOINT_URL="http://localhost:9000"
ENV AWS_S3_BUCKET_NAME="uploads"
ENV MINIO_ROOT_USER="access-key"
ENV MINIO_ROOT_PASSWORD="secret-key"
ENV BUCKET_NAME="uploads"
ENV FILE_SIZE_LIMIT="5242880"
# *****************************************************************************
RUN /app/pg-setup.sh
CMD ["/usr/bin/supervisord", "-c", "/app/supervisord.conf"]

View File

@ -1,19 +1,28 @@
FROM --platform=$BUILDPLATFORM tonistiigi/binfmt as binfmt
FROM --platform=$BUILDPLATFORM tonistiigi/binfmt AS binfmt
FROM debian:12-slim
FROM python:3.12-slim
# Set environment variables to non-interactive for apt
ENV DEBIAN_FRONTEND=noninteractive
ENV BUILD_TYPE=full
SHELL [ "/bin/bash", "-c" ]
WORKDIR /app
RUN mkdir -p /app/{data,logs} && \
mkdir -p /app/data/{redis,pg,minio,nginx} && \
mkdir -p /app/logs/{access,error} && \
mkdir -p /etc/supervisor/conf.d
# Update the package list and install prerequisites
RUN apt-get update && \
apt-get install -y \
gnupg2 curl ca-certificates lsb-release software-properties-common \
build-essential libssl-dev zlib1g-dev libbz2-dev libreadline-dev \
libsqlite3-dev wget llvm libncurses5-dev libncursesw5-dev xz-utils \
tk-dev libffi-dev liblzma-dev supervisor nginx nano vim ncdu
tk-dev libffi-dev liblzma-dev supervisor nginx nano vim ncdu \
sudo lsof net-tools libpq-dev procps gettext
# Install Redis 7.2
RUN echo "deb http://deb.debian.org/debian $(lsb_release -cs)-backports main" > /etc/apt/sources.list.d/backports.list && \
@ -23,13 +32,15 @@ RUN echo "deb http://deb.debian.org/debian $(lsb_release -cs)-backports main" >
apt-get install -y redis-server
# Install PostgreSQL 15
ENV POSTGRES_VERSION 15
ENV POSTGRES_VERSION=15
RUN curl -fsSL https://www.postgresql.org/media/keys/ACCC4CF8.asc | gpg --dearmor -o /usr/share/keyrings/pgdg-archive-keyring.gpg && \
echo "deb [signed-by=/usr/share/keyrings/pgdg-archive-keyring.gpg] http://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list && \
apt-get update && \
apt-get install -y postgresql-$POSTGRES_VERSION postgresql-client-$POSTGRES_VERSION && \
mkdir -p /var/lib/postgresql/data && \
chown -R postgres:postgres /var/lib/postgresql
COPY postgresql.conf /etc/postgresql/postgresql.conf
RUN sudo -u postgres /usr/lib/postgresql/$POSTGRES_VERSION/bin/initdb -D /var/lib/postgresql/data
# Install MinIO
ARG TARGETARCH
@ -42,51 +53,21 @@ RUN if [ "$TARGETARCH" = "amd64" ]; then \
fi && \
chmod +x /usr/local/bin/minio
# Install Node.js 18
RUN curl -fsSL https://deb.nodesource.com/setup_18.x | bash - && \
apt-get install -y nodejs
# Install Python 3.12 from source
RUN cd /usr/src && \
wget https://www.python.org/ftp/python/3.12.0/Python-3.12.0.tgz && \
tar xzf Python-3.12.0.tgz && \
cd Python-3.12.0 && \
./configure --enable-optimizations && \
make altinstall && \
rm -f /usr/src/Python-3.12.0.tgz
RUN python3.12 -m pip install --upgrade pip
RUN echo "alias python=/usr/local/bin/python3.12" >> ~/.bashrc && \
echo "alias pip=/usr/local/bin/pip3.12" >> ~/.bashrc
# Clean up
RUN apt-get clean && \
rm -rf /var/lib/apt/lists/* /usr/src/Python-3.12.0
WORKDIR /app
RUN mkdir -p /app/{data,logs} && \
mkdir -p /app/data/{redis,pg,minio,nginx} && \
mkdir -p /app/logs/{access,error} && \
mkdir -p /etc/supervisor/conf.d
apt-get install -y nodejs && \
python -m pip install --upgrade pip && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
# Create Supervisor configuration file
COPY supervisord.base /app/supervisord.conf
RUN apt-get update && \
apt-get install -y sudo lsof net-tools libpq-dev procps gettext && \
apt-get clean
RUN sudo -u postgres /usr/lib/postgresql/$POSTGRES_VERSION/bin/initdb -D /var/lib/postgresql/data
COPY postgresql.conf /etc/postgresql/postgresql.conf
RUN echo "alias python=/usr/local/bin/python3.12" >> ~/.bashrc && \
echo "alias pip=/usr/local/bin/pip3.12" >> ~/.bashrc
COPY supervisord-full-base /app/supervisord.conf
COPY nginx.conf /etc/nginx/nginx.conf.template
COPY env.sh /app/nginx-start.sh
RUN chmod +x /app/nginx-start.sh
# Expose ports for Redis, PostgreSQL, and MinIO
EXPOSE 6379 5432 9000 80
EXPOSE 6379 5432 9000 80 443
# Start Supervisor
CMD ["/usr/bin/supervisord", "-c", "/app/supervisord.conf"]

45
aio/Dockerfile-base-slim Normal file
View File

@ -0,0 +1,45 @@
FROM --platform=$BUILDPLATFORM tonistiigi/binfmt AS binfmt
FROM python:3.12-slim
# Set environment variables to non-interactive for apt
ENV DEBIAN_FRONTEND=noninteractive
ENV BUILD_TYPE=slim
SHELL [ "/bin/bash", "-c" ]
WORKDIR /app
RUN mkdir -p /app/{data,logs} && \
mkdir -p /app/data/{nginx} && \
mkdir -p /app/logs/{access,error} && \
mkdir -p /etc/supervisor/conf.d
# Update the package list and install prerequisites
RUN apt-get update && \
apt-get install -y \
gnupg2 curl ca-certificates lsb-release software-properties-common \
build-essential libssl-dev zlib1g-dev libbz2-dev libreadline-dev \
libsqlite3-dev wget llvm libncurses5-dev libncursesw5-dev xz-utils \
tk-dev libffi-dev liblzma-dev supervisor nginx nano vim ncdu \
sudo lsof net-tools libpq-dev procps gettext
# Install Node.js 18
RUN curl -fsSL https://deb.nodesource.com/setup_18.x | bash - && \
apt-get install -y nodejs
RUN python -m pip install --upgrade pip && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
# Create Supervisor configuration file
COPY supervisord-slim-base /app/supervisord.conf
COPY nginx.conf /etc/nginx/nginx.conf.template
COPY env.sh /app/nginx-start.sh
RUN chmod +x /app/nginx-start.sh
# Expose ports for Redis, PostgreSQL, and MinIO
EXPOSE 80 443
# Start Supervisor
CMD ["/usr/bin/supervisord", "-c", "/app/supervisord.conf"]

View File

@ -1,30 +0,0 @@
#!/bin/bash
set -e
if [ "$1" = 'api' ]; then
source /app/venv/bin/activate
cd /app/api
exec ./bin/docker-entrypoint-api.sh
elif [ "$1" = 'worker' ]; then
source /app/venv/bin/activate
cd /app/api
exec ./bin/docker-entrypoint-worker.sh
elif [ "$1" = 'beat' ]; then
source /app/venv/bin/activate
cd /app/api
exec ./bin/docker-entrypoint-beat.sh
elif [ "$1" = 'migrator' ]; then
source /app/venv/bin/activate
cd /app/api
exec ./bin/docker-entrypoint-migrator.sh
elif [ "$1" = 'web' ]; then
node /app/web/web/server.js
elif [ "$1" = 'space' ]; then
node /app/space/space/server.js
elif [ "$1" = 'admin' ]; then
node /app/admin/admin/server.js
else
echo "Command not found"
exit 1
fi

7
aio/env.sh Normal file
View File

@ -0,0 +1,7 @@
#!/bin/bash
export dollar="$"
export http_upgrade="http_upgrade"
export scheme="scheme"
envsubst < /etc/nginx/nginx.conf.template > /etc/nginx/nginx.conf
exec nginx -g 'daemon off;'

View File

@ -37,7 +37,6 @@ http {
proxy_pass http://localhost:3002/spaces/;
}
location /god-mode/ {
proxy_http_version 1.1;
proxy_set_header Upgrade ${dollar}http_upgrade;

View File

@ -1,14 +1,14 @@
#!/bin/bash
if [ "$BUILD_TYPE" == "full" ]; then
# Variables
set -o allexport
source plane.env set
set +o allexport
export PGHOST=localhost
export PGHOST=localhost
sudo -u postgres "/usr/lib/postgresql/${POSTGRES_VERSION}/bin/pg_ctl" -D /var/lib/postgresql/data start
sudo -u postgres "/usr/lib/postgresql/${POSTGRES_VERSION}/bin/psql" --command "CREATE USER $POSTGRES_USER WITH SUPERUSER PASSWORD '$POSTGRES_PASSWORD';" && \
sudo -u postgres "/usr/lib/postgresql/${POSTGRES_VERSION}/bin/createdb" -O "$POSTGRES_USER" "$POSTGRES_DB" && \
sudo -u postgres "/usr/lib/postgresql/${POSTGRES_VERSION}/bin/psql" --command "GRANT ALL PRIVILEGES ON DATABASE $POSTGRES_DB TO $POSTGRES_USER;" && \
sudo -u postgres "/usr/lib/postgresql/${POSTGRES_VERSION}/bin/pg_ctl" -D /var/lib/postgresql/data stop
fi
sudo -u postgres "/usr/lib/postgresql/${POSTGRES_VERSION}/bin/pg_ctl" -D /var/lib/postgresql/data start
sudo -u postgres "/usr/lib/postgresql/${POSTGRES_VERSION}/bin/psql" --command "CREATE USER $POSTGRES_USER WITH SUPERUSER PASSWORD '$POSTGRES_PASSWORD';" && \
sudo -u postgres "/usr/lib/postgresql/${POSTGRES_VERSION}/bin/createdb" -O "$POSTGRES_USER" "$POSTGRES_DB" && \
sudo -u postgres "/usr/lib/postgresql/${POSTGRES_VERSION}/bin/pg_ctl" -D /var/lib/postgresql/data stop

View File

@ -1,12 +1,815 @@
# -----------------------------
# PostgreSQL configuration file
# -----------------------------
#
# This file consists of lines of the form:
#
# name = value
#
# (The "=" is optional.) Whitespace may be used. Comments are introduced with
# "#" anywhere on a line. The complete list of parameter names and allowed
# values can be found in the PostgreSQL documentation.
#
# The commented-out settings shown in this file represent the default values.
# Re-commenting a setting is NOT sufficient to revert it to the default value;
# you need to reload the server.
#
# This file is read on server startup and when the server receives a SIGHUP
# signal. If you edit the file on a running system, you have to SIGHUP the
# server for the changes to take effect, run "pg_ctl reload", or execute
# "SELECT pg_reload_conf()". Some parameters, which are marked below,
# require a server shutdown and restart to take effect.
#
# Any parameter can also be given as a command-line option to the server, e.g.,
# "postgres -c log_connections=on". Some parameters can be changed at run time
# with the "SET" SQL command.
#
# Memory units: B = bytes Time units: us = microseconds
# kB = kilobytes ms = milliseconds
# MB = megabytes s = seconds
# GB = gigabytes min = minutes
# TB = terabytes h = hours
# d = days
# Allow connections from any IP address
listen_addresses = '*'
# Set the maximum number of connections
max_connections = 100
#------------------------------------------------------------------------------
# FILE LOCATIONS
#------------------------------------------------------------------------------
# Set the shared buffers size
shared_buffers = 128MB
# The default values of these variables are driven from the -D command-line
# option or PGDATA environment variable, represented here as ConfigDir.
# Other custom configurations can be added here
data_directory = '/var/lib/postgresql/data' # use data in another directory
# (change requires restart)
hba_file = '/etc/postgresql/15/main/pg_hba.conf' # host-based authentication file
# (change requires restart)
ident_file = '/etc/postgresql/15/main/pg_ident.conf' # ident configuration file
# (change requires restart)
# If external_pid_file is not explicitly set, no extra PID file is written.
external_pid_file = '/var/run/postgresql/15-main.pid' # write an extra PID file
# (change requires restart)
#------------------------------------------------------------------------------
# CONNECTIONS AND AUTHENTICATION
#------------------------------------------------------------------------------
# - Connection Settings -
listen_addresses = 'localhost' # what IP address(es) to listen on;
# comma-separated list of addresses;
# defaults to 'localhost'; use '*' for all
# (change requires restart)
port = 5432 # (change requires restart)
max_connections = 200 # (change requires restart)
#superuser_reserved_connections = 3 # (change requires restart)
unix_socket_directories = '/var/run/postgresql' # comma-separated list of directories
# (change requires restart)
#unix_socket_group = '' # (change requires restart)
#unix_socket_permissions = 0777 # begin with 0 to use octal notation
# (change requires restart)
#bonjour = off # advertise server via Bonjour
# (change requires restart)
#bonjour_name = '' # defaults to the computer name
# (change requires restart)
# - TCP settings -
# see "man tcp" for details
#tcp_keepalives_idle = 0 # TCP_KEEPIDLE, in seconds;
# 0 selects the system default
#tcp_keepalives_interval = 0 # TCP_KEEPINTVL, in seconds;
# 0 selects the system default
#tcp_keepalives_count = 0 # TCP_KEEPCNT;
# 0 selects the system default
#tcp_user_timeout = 0 # TCP_USER_TIMEOUT, in milliseconds;
# 0 selects the system default
#client_connection_check_interval = 0 # time between checks for client
# disconnection while running queries;
# 0 for never
# - Authentication -
#authentication_timeout = 1min # 1s-600s
#password_encryption = scram-sha-256 # scram-sha-256 or md5
#db_user_namespace = off
# GSSAPI using Kerberos
#krb_server_keyfile = 'FILE:${sysconfdir}/krb5.keytab'
#krb_caseins_users = off
# - SSL -
ssl = on
#ssl_ca_file = ''
ssl_cert_file = '/etc/ssl/certs/ssl-cert-snakeoil.pem'
#ssl_crl_file = ''
#ssl_crl_dir = ''
ssl_key_file = '/etc/ssl/private/ssl-cert-snakeoil.key'
#ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # allowed SSL ciphers
#ssl_prefer_server_ciphers = on
#ssl_ecdh_curve = 'prime256v1'
#ssl_min_protocol_version = 'TLSv1.2'
#ssl_max_protocol_version = ''
#ssl_dh_params_file = ''
#ssl_passphrase_command = ''
#ssl_passphrase_command_supports_reload = off
#------------------------------------------------------------------------------
# RESOURCE USAGE (except WAL)
#------------------------------------------------------------------------------
# - Memory -
shared_buffers = 256MB # min 128kB
# (change requires restart)
#huge_pages = try # on, off, or try
# (change requires restart)
#huge_page_size = 0 # zero for system default
# (change requires restart)
#temp_buffers = 8MB # min 800kB
#max_prepared_transactions = 0 # zero disables the feature
# (change requires restart)
# Caution: it is not advisable to set max_prepared_transactions nonzero unless
# you actively intend to use prepared transactions.
#work_mem = 4MB # min 64kB
#hash_mem_multiplier = 2.0 # 1-1000.0 multiplier on hash table work_mem
#maintenance_work_mem = 64MB # min 1MB
#autovacuum_work_mem = -1 # min 1MB, or -1 to use maintenance_work_mem
#logical_decoding_work_mem = 64MB # min 64kB
#max_stack_depth = 2MB # min 100kB
#shared_memory_type = mmap # the default is the first option
# supported by the operating system:
# mmap
# sysv
# windows
# (change requires restart)
dynamic_shared_memory_type = posix # the default is usually the first option
# supported by the operating system:
# posix
# sysv
# windows
# mmap
# (change requires restart)
#min_dynamic_shared_memory = 0MB # (change requires restart)
# - Disk -
#temp_file_limit = -1 # limits per-process temp file space
# in kilobytes, or -1 for no limit
# - Kernel Resources -
#max_files_per_process = 1000 # min 64
# (change requires restart)
# - Cost-Based Vacuum Delay -
#vacuum_cost_delay = 0 # 0-100 milliseconds (0 disables)
#vacuum_cost_page_hit = 1 # 0-10000 credits
#vacuum_cost_page_miss = 2 # 0-10000 credits
#vacuum_cost_page_dirty = 20 # 0-10000 credits
#vacuum_cost_limit = 200 # 1-10000 credits
# - Background Writer -
#bgwriter_delay = 200ms # 10-10000ms between rounds
#bgwriter_lru_maxpages = 100 # max buffers written/round, 0 disables
#bgwriter_lru_multiplier = 2.0 # 0-10.0 multiplier on buffers scanned/round
#bgwriter_flush_after = 512kB # measured in pages, 0 disables
# - Asynchronous Behavior -
#backend_flush_after = 0 # measured in pages, 0 disables
#effective_io_concurrency = 1 # 1-1000; 0 disables prefetching
#maintenance_io_concurrency = 10 # 1-1000; 0 disables prefetching
#max_worker_processes = 8 # (change requires restart)
#max_parallel_workers_per_gather = 2 # limited by max_parallel_workers
#max_parallel_maintenance_workers = 2 # limited by max_parallel_workers
#max_parallel_workers = 8 # number of max_worker_processes that
# can be used in parallel operations
#parallel_leader_participation = on
#old_snapshot_threshold = -1 # 1min-60d; -1 disables; 0 is immediate
# (change requires restart)
#------------------------------------------------------------------------------
# WRITE-AHEAD LOG
#------------------------------------------------------------------------------
# - Settings -
#wal_level = replica # minimal, replica, or logical
# (change requires restart)
#fsync = on # flush data to disk for crash safety
# (turning this off can cause
# unrecoverable data corruption)
#synchronous_commit = on # synchronization level;
# off, local, remote_write, remote_apply, or on
#wal_sync_method = fsync # the default is the first option
# supported by the operating system:
# open_datasync
# fdatasync (default on Linux and FreeBSD)
# fsync
# fsync_writethrough
# open_sync
#full_page_writes = on # recover from partial page writes
#wal_log_hints = off # also do full page writes of non-critical updates
# (change requires restart)
#wal_compression = off # enables compression of full-page writes;
# off, pglz, lz4, zstd, or on
#wal_init_zero = on # zero-fill new WAL files
#wal_recycle = on # recycle WAL files
#wal_buffers = -1 # min 32kB, -1 sets based on shared_buffers
# (change requires restart)
#wal_writer_delay = 200ms # 1-10000 milliseconds
#wal_writer_flush_after = 1MB # measured in pages, 0 disables
#wal_skip_threshold = 2MB
#commit_delay = 0 # range 0-100000, in microseconds
#commit_siblings = 5 # range 1-1000
# - Checkpoints -
#checkpoint_timeout = 5min # range 30s-1d
#checkpoint_completion_target = 0.9 # checkpoint target duration, 0.0 - 1.0
#checkpoint_flush_after = 256kB # measured in pages, 0 disables
#checkpoint_warning = 30s # 0 disables
max_wal_size = 1GB
min_wal_size = 80MB
# - Prefetching during recovery -
#recovery_prefetch = try # prefetch pages referenced in the WAL?
#wal_decode_buffer_size = 512kB # lookahead window used for prefetching
# (change requires restart)
# - Archiving -
#archive_mode = off # enables archiving; off, on, or always
# (change requires restart)
#archive_library = '' # library to use to archive a logfile segment
# (empty string indicates archive_command should
# be used)
#archive_command = '' # command to use to archive a logfile segment
# placeholders: %p = path of file to archive
# %f = file name only
# e.g. 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/server/archivedir/%f'
#archive_timeout = 0 # force a logfile segment switch after this
# number of seconds; 0 disables
# - Archive Recovery -
# These are only used in recovery mode.
#restore_command = '' # command to use to restore an archived logfile segment
# placeholders: %p = path of file to restore
# %f = file name only
# e.g. 'cp /mnt/server/archivedir/%f %p'
#archive_cleanup_command = '' # command to execute at every restartpoint
#recovery_end_command = '' # command to execute at completion of recovery
# - Recovery Target -
# Set these only when performing a targeted recovery.
#recovery_target = '' # 'immediate' to end recovery as soon as a
# consistent state is reached
# (change requires restart)
#recovery_target_name = '' # the named restore point to which recovery will proceed
# (change requires restart)
#recovery_target_time = '' # the time stamp up to which recovery will proceed
# (change requires restart)
#recovery_target_xid = '' # the transaction ID up to which recovery will proceed
# (change requires restart)
#recovery_target_lsn = '' # the WAL LSN up to which recovery will proceed
# (change requires restart)
#recovery_target_inclusive = on # Specifies whether to stop:
# just after the specified recovery target (on)
# just before the recovery target (off)
# (change requires restart)
#recovery_target_timeline = 'latest' # 'current', 'latest', or timeline ID
# (change requires restart)
#recovery_target_action = 'pause' # 'pause', 'promote', 'shutdown'
# (change requires restart)
#------------------------------------------------------------------------------
# REPLICATION
#------------------------------------------------------------------------------
# - Sending Servers -
# Set these on the primary and on any standby that will send replication data.
#max_wal_senders = 10 # max number of walsender processes
# (change requires restart)
#max_replication_slots = 10 # max number of replication slots
# (change requires restart)
#wal_keep_size = 0 # in megabytes; 0 disables
#max_slot_wal_keep_size = -1 # in megabytes; -1 disables
#wal_sender_timeout = 60s # in milliseconds; 0 disables
#track_commit_timestamp = off # collect timestamp of transaction commit
# (change requires restart)
# - Primary Server -
# These settings are ignored on a standby server.
#synchronous_standby_names = '' # standby servers that provide sync rep
# method to choose sync standbys, number of sync standbys,
# and comma-separated list of application_name
# from standby(s); '*' = all
#vacuum_defer_cleanup_age = 0 # number of xacts by which cleanup is delayed
# - Standby Servers -
# These settings are ignored on a primary server.
#primary_conninfo = '' # connection string to sending server
#primary_slot_name = '' # replication slot on sending server
#promote_trigger_file = '' # file name whose presence ends recovery
#hot_standby = on # "off" disallows queries during recovery
# (change requires restart)
#max_standby_archive_delay = 30s # max delay before canceling queries
# when reading WAL from archive;
# -1 allows indefinite delay
#max_standby_streaming_delay = 30s # max delay before canceling queries
# when reading streaming WAL;
# -1 allows indefinite delay
#wal_receiver_create_temp_slot = off # create temp slot if primary_slot_name
# is not set
#wal_receiver_status_interval = 10s # send replies at least this often
# 0 disables
#hot_standby_feedback = off # send info from standby to prevent
# query conflicts
#wal_receiver_timeout = 60s # time that receiver waits for
# communication from primary
# in milliseconds; 0 disables
#wal_retrieve_retry_interval = 5s # time to wait before retrying to
# retrieve WAL after a failed attempt
#recovery_min_apply_delay = 0 # minimum delay for applying changes during recovery
# - Subscribers -
# These settings are ignored on a publisher.
#max_logical_replication_workers = 4 # taken from max_worker_processes
# (change requires restart)
#max_sync_workers_per_subscription = 2 # taken from max_logical_replication_workers
#------------------------------------------------------------------------------
# QUERY TUNING
#------------------------------------------------------------------------------
# - Planner Method Configuration -
#enable_async_append = on
#enable_bitmapscan = on
#enable_gathermerge = on
#enable_hashagg = on
#enable_hashjoin = on
#enable_incremental_sort = on
#enable_indexscan = on
#enable_indexonlyscan = on
#enable_material = on
#enable_memoize = on
#enable_mergejoin = on
#enable_nestloop = on
#enable_parallel_append = on
#enable_parallel_hash = on
#enable_partition_pruning = on
#enable_partitionwise_join = off
#enable_partitionwise_aggregate = off
#enable_seqscan = on
#enable_sort = on
#enable_tidscan = on
# - Planner Cost Constants -
#seq_page_cost = 1.0 # measured on an arbitrary scale
#random_page_cost = 4.0 # same scale as above
#cpu_tuple_cost = 0.01 # same scale as above
#cpu_index_tuple_cost = 0.005 # same scale as above
#cpu_operator_cost = 0.0025 # same scale as above
#parallel_setup_cost = 1000.0 # same scale as above
#parallel_tuple_cost = 0.1 # same scale as above
#min_parallel_table_scan_size = 8MB
#min_parallel_index_scan_size = 512kB
#effective_cache_size = 4GB
#jit_above_cost = 100000 # perform JIT compilation if available
# and query more expensive than this;
# -1 disables
#jit_inline_above_cost = 500000 # inline small functions if query is
# more expensive than this; -1 disables
#jit_optimize_above_cost = 500000 # use expensive JIT optimizations if
# query is more expensive than this;
# -1 disables
# - Genetic Query Optimizer -
#geqo = on
#geqo_threshold = 12
#geqo_effort = 5 # range 1-10
#geqo_pool_size = 0 # selects default based on effort
#geqo_generations = 0 # selects default based on effort
#geqo_selection_bias = 2.0 # range 1.5-2.0
#geqo_seed = 0.0 # range 0.0-1.0
# - Other Planner Options -
#default_statistics_target = 100 # range 1-10000
#constraint_exclusion = partition # on, off, or partition
#cursor_tuple_fraction = 0.1 # range 0.0-1.0
#from_collapse_limit = 8
#jit = on # allow JIT compilation
#join_collapse_limit = 8 # 1 disables collapsing of explicit
# JOIN clauses
#plan_cache_mode = auto # auto, force_generic_plan or
# force_custom_plan
#recursive_worktable_factor = 10.0 # range 0.001-1000000
#------------------------------------------------------------------------------
# REPORTING AND LOGGING
#------------------------------------------------------------------------------
# - Where to Log -
#log_destination = 'stderr' # Valid values are combinations of
# stderr, csvlog, jsonlog, syslog, and
# eventlog, depending on platform.
# csvlog and jsonlog require
# logging_collector to be on.
# This is used when logging to stderr:
#logging_collector = off # Enable capturing of stderr, jsonlog,
# and csvlog into log files. Required
# to be on for csvlogs and jsonlogs.
# (change requires restart)
# These are only used if logging_collector is on:
#log_directory = 'log' # directory where log files are written,
# can be absolute or relative to PGDATA
#log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log' # log file name pattern,
# can include strftime() escapes
#log_file_mode = 0600 # creation mode for log files,
# begin with 0 to use octal notation
#log_rotation_age = 1d # Automatic rotation of logfiles will
# happen after that time. 0 disables.
#log_rotation_size = 10MB # Automatic rotation of logfiles will
# happen after that much log output.
# 0 disables.
#log_truncate_on_rotation = off # If on, an existing log file with the
# same name as the new log file will be
# truncated rather than appended to.
# But such truncation only occurs on
# time-driven rotation, not on restarts
# or size-driven rotation. Default is
# off, meaning append to existing files
# in all cases.
# These are relevant when logging to syslog:
#syslog_facility = 'LOCAL0'
#syslog_ident = 'postgres'
#syslog_sequence_numbers = on
#syslog_split_messages = on
# This is only relevant when logging to eventlog (Windows):
# (change requires restart)
#event_source = 'PostgreSQL'
# - When to Log -
#log_min_messages = warning # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# info
# notice
# warning
# error
# log
# fatal
# panic
#log_min_error_statement = error # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# info
# notice
# warning
# error
# log
# fatal
# panic (effectively off)
#log_min_duration_statement = -1 # -1 is disabled, 0 logs all statements
# and their durations, > 0 logs only
# statements running at least this number
# of milliseconds
#log_min_duration_sample = -1 # -1 is disabled, 0 logs a sample of statements
# and their durations, > 0 logs only a sample of
# statements running at least this number
# of milliseconds;
# sample fraction is determined by log_statement_sample_rate
#log_statement_sample_rate = 1.0 # fraction of logged statements exceeding
# log_min_duration_sample to be logged;
# 1.0 logs all such statements, 0.0 never logs
#log_transaction_sample_rate = 0.0 # fraction of transactions whose statements
# are logged regardless of their duration; 1.0 logs all
# statements from all transactions, 0.0 never logs
#log_startup_progress_interval = 10s # Time between progress updates for
# long-running startup operations.
# 0 disables the feature, > 0 indicates
# the interval in milliseconds.
# - What to Log -
#debug_print_parse = off
#debug_print_rewritten = off
#debug_print_plan = off
#debug_pretty_print = on
#log_autovacuum_min_duration = 10min # log autovacuum activity;
# -1 disables, 0 logs all actions and
# their durations, > 0 logs only
# actions running at least this number
# of milliseconds.
#log_checkpoints = on
#log_connections = off
#log_disconnections = off
#log_duration = off
#log_error_verbosity = default # terse, default, or verbose messages
#log_hostname = off
log_line_prefix = '%m [%p] %q%u@%d ' # special values:
# %a = application name
# %u = user name
# %d = database name
# %r = remote host and port
# %h = remote host
# %b = backend type
# %p = process ID
# %P = process ID of parallel group leader
# %t = timestamp without milliseconds
# %m = timestamp with milliseconds
# %n = timestamp with milliseconds (as a Unix epoch)
# %Q = query ID (0 if none or not computed)
# %i = command tag
# %e = SQL state
# %c = session ID
# %l = session line number
# %s = session start timestamp
# %v = virtual transaction ID
# %x = transaction ID (0 if none)
# %q = stop here in non-session
# processes
# %% = '%'
# e.g. '<%u%%%d> '
#log_lock_waits = off # log lock waits >= deadlock_timeout
#log_recovery_conflict_waits = off # log standby recovery conflict waits
# >= deadlock_timeout
#log_parameter_max_length = -1 # when logging statements, limit logged
# bind-parameter values to N bytes;
# -1 means print in full, 0 disables
#log_parameter_max_length_on_error = 0 # when logging an error, limit logged
# bind-parameter values to N bytes;
# -1 means print in full, 0 disables
#log_statement = 'none' # none, ddl, mod, all
#log_replication_commands = off
#log_temp_files = -1 # log temporary files equal or larger
# than the specified size in kilobytes;
# -1 disables, 0 logs all temp files
log_timezone = 'Etc/UTC'
#------------------------------------------------------------------------------
# PROCESS TITLE
#------------------------------------------------------------------------------
cluster_name = '15/main' # added to process titles if nonempty
# (change requires restart)
#update_process_title = on
#------------------------------------------------------------------------------
# STATISTICS
#------------------------------------------------------------------------------
# - Cumulative Query and Index Statistics -
#track_activities = on
#track_activity_query_size = 1024 # (change requires restart)
#track_counts = on
#track_io_timing = off
#track_wal_io_timing = off
#track_functions = none # none, pl, all
#stats_fetch_consistency = cache
# - Monitoring -
#compute_query_id = auto
#log_statement_stats = off
#log_parser_stats = off
#log_planner_stats = off
#log_executor_stats = off
#------------------------------------------------------------------------------
# AUTOVACUUM
#------------------------------------------------------------------------------
#autovacuum = on # Enable autovacuum subprocess? 'on'
# requires track_counts to also be on.
#autovacuum_max_workers = 3 # max number of autovacuum subprocesses
# (change requires restart)
#autovacuum_naptime = 1min # time between autovacuum runs
#autovacuum_vacuum_threshold = 50 # min number of row updates before
# vacuum
#autovacuum_vacuum_insert_threshold = 1000 # min number of row inserts
# before vacuum; -1 disables insert
# vacuums
#autovacuum_analyze_threshold = 50 # min number of row updates before
# analyze
#autovacuum_vacuum_scale_factor = 0.2 # fraction of table size before vacuum
#autovacuum_vacuum_insert_scale_factor = 0.2 # fraction of inserts over table
# size before insert vacuum
#autovacuum_analyze_scale_factor = 0.1 # fraction of table size before analyze
#autovacuum_freeze_max_age = 200000000 # maximum XID age before forced vacuum
# (change requires restart)
#autovacuum_multixact_freeze_max_age = 400000000 # maximum multixact age
# before forced vacuum
# (change requires restart)
#autovacuum_vacuum_cost_delay = 2ms # default vacuum cost delay for
# autovacuum, in milliseconds;
# -1 means use vacuum_cost_delay
#autovacuum_vacuum_cost_limit = -1 # default vacuum cost limit for
# autovacuum, -1 means use
# vacuum_cost_limit
#------------------------------------------------------------------------------
# CLIENT CONNECTION DEFAULTS
#------------------------------------------------------------------------------
# - Statement Behavior -
#client_min_messages = notice # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# log
# notice
# warning
# error
#search_path = '"$user", public' # schema names
#row_security = on
#default_table_access_method = 'heap'
#default_tablespace = '' # a tablespace name, '' uses the default
#default_toast_compression = 'pglz' # 'pglz' or 'lz4'
#temp_tablespaces = '' # a list of tablespace names, '' uses
# only default tablespace
#check_function_bodies = on
#default_transaction_isolation = 'read committed'
#default_transaction_read_only = off
#default_transaction_deferrable = off
#session_replication_role = 'origin'
#statement_timeout = 0 # in milliseconds, 0 is disabled
#lock_timeout = 0 # in milliseconds, 0 is disabled
#idle_in_transaction_session_timeout = 0 # in milliseconds, 0 is disabled
#idle_session_timeout = 0 # in milliseconds, 0 is disabled
#vacuum_freeze_table_age = 150000000
#vacuum_freeze_min_age = 50000000
#vacuum_failsafe_age = 1600000000
#vacuum_multixact_freeze_table_age = 150000000
#vacuum_multixact_freeze_min_age = 5000000
#vacuum_multixact_failsafe_age = 1600000000
#bytea_output = 'hex' # hex, escape
#xmlbinary = 'base64'
#xmloption = 'content'
#gin_pending_list_limit = 4MB
# - Locale and Formatting -
datestyle = 'iso, mdy'
#intervalstyle = 'postgres'
timezone = 'Etc/UTC'
#timezone_abbreviations = 'Default' # Select the set of available time zone
# abbreviations. Currently, there are
# Default
# Australia (historical usage)
# India
# You can create your own file in
# share/timezonesets/.
#extra_float_digits = 1 # min -15, max 3; any value >0 actually
# selects precise output mode
#client_encoding = sql_ascii # actually, defaults to database
# encoding
# These settings are initialized by initdb, but they can be changed.
lc_messages = 'C.UTF-8' # locale for system error message
# strings
lc_monetary = 'C.UTF-8' # locale for monetary formatting
lc_numeric = 'C.UTF-8' # locale for number formatting
lc_time = 'C.UTF-8' # locale for time formatting
# default configuration for text search
default_text_search_config = 'pg_catalog.english'
# - Shared Library Preloading -
#local_preload_libraries = ''
#session_preload_libraries = ''
#shared_preload_libraries = '' # (change requires restart)
#jit_provider = 'llvmjit' # JIT library to use
# - Other Defaults -
#dynamic_library_path = '$libdir'
#extension_destdir = '' # prepend path when loading extensions
# and shared objects (added by Debian)
#gin_fuzzy_search_limit = 0
#------------------------------------------------------------------------------
# LOCK MANAGEMENT
#------------------------------------------------------------------------------
#deadlock_timeout = 1s
#max_locks_per_transaction = 64 # min 10
# (change requires restart)
#max_pred_locks_per_transaction = 64 # min 10
# (change requires restart)
#max_pred_locks_per_relation = -2 # negative values mean
# (max_pred_locks_per_transaction
# / -max_pred_locks_per_relation) - 1
#max_pred_locks_per_page = 2 # min 0
#------------------------------------------------------------------------------
# VERSION AND PLATFORM COMPATIBILITY
#------------------------------------------------------------------------------
# - Previous PostgreSQL Versions -
#array_nulls = on
#backslash_quote = safe_encoding # on, off, or safe_encoding
#escape_string_warning = on
#lo_compat_privileges = off
#quote_all_identifiers = off
#standard_conforming_strings = on
#synchronize_seqscans = on
# - Other Platforms and Clients -
#transform_null_equals = off
#------------------------------------------------------------------------------
# ERROR HANDLING
#------------------------------------------------------------------------------
#exit_on_error = off # terminate session on any error?
#restart_after_crash = on # reinitialize after backend crash?
#data_sync_retry = off # retry or panic on failure to fsync
# data?
# (change requires restart)
#recovery_init_sync_method = fsync # fsync, syncfs (Linux 5.8+)
#------------------------------------------------------------------------------
# CONFIG FILE INCLUDES
#------------------------------------------------------------------------------
# These options allow settings to be loaded from files other than the
# default postgresql.conf. Note that these are directives, not variable
# assignments, so they can usefully be given more than once.
# include_dir = 'conf.d' # include files ending in '.conf' from
# a directory, e.g., 'conf.d'
#include_if_exists = '...' # include file only if it exists
#include = '...' # include file
#------------------------------------------------------------------------------
# CUSTOMIZED OPTIONS
#------------------------------------------------------------------------------
# Add settings for extensions here

71
aio/supervisord-app Normal file
View File

@ -0,0 +1,71 @@
[program:web]
command=node /app/web/web/server.js
autostart=true
autorestart=true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
environment=PORT=3001,HOSTNAME=0.0.0.0
[program:space]
command=node /app/space/space/server.js
autostart=true
autorestart=true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
environment=PORT=3002,HOSTNAME=0.0.0.0
[program:admin]
command=node /app/admin/admin/server.js
autostart=true
autorestart=true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
environment=PORT=3003,HOSTNAME=0.0.0.0
[program:migrator]
directory=/app/api
command=sh -c "./bin/docker-entrypoint-migrator.sh"
autostart=true
autorestart=false
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
[program:api]
directory=/app/api
command=sh -c "./bin/docker-entrypoint-api.sh"
autostart=true
autorestart=true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
[program:worker]
directory=/app/api
command=sh -c "./bin/docker-entrypoint-worker.sh"
autostart=true
autorestart=true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
[program:beat]
directory=/app/api
command=sh -c "./bin/docker-entrypoint-beat.sh"
autostart=true
autorestart=true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0

View File

@ -2,7 +2,7 @@
user=root
nodaemon=true
stderr_logfile=/app/logs/error/supervisor.err.log
stdout_logfile=/app/logs/access/supervisor.out.log
stdout_logfile=/app/logs/access/supervisor.log
[program:redis]
directory=/app/data/redis
@ -10,15 +10,15 @@ command=redis-server
autostart=true
autorestart=true
stderr_logfile=/app/logs/error/redis.err.log
stdout_logfile=/app/logs/access/redis.out.log
stdout_logfile=/app/logs/access/redis.log
[program:postgresql]
user=postgres
command=/usr/lib/postgresql/15/bin/postgres --config-file=/etc/postgresql/15/main/postgresql.conf
command=/usr/lib/postgresql/15/bin/postgres --config-file=/etc/postgresql/postgresql.conf
autostart=true
autorestart=true
stderr_logfile=/app/logs/error/postgresql.err.log
stdout_logfile=/app/logs/access/postgresql.out.log
stdout_logfile=/app/logs/access/postgresql.log
[program:minio]
directory=/app/data/minio
@ -26,12 +26,13 @@ command=minio server /app/data/minio
autostart=true
autorestart=true
stderr_logfile=/app/logs/error/minio.err.log
stdout_logfile=/app/logs/access/minio.out.log
stdout_logfile=/app/logs/access/minio.log
[program:nginx]
directory=/app/data/nginx
command=/usr/sbin/nginx -g 'daemon off;'
command=/app/nginx-start.sh
autostart=true
autorestart=true
stderr_logfile=/app/logs/error/nginx.err.log
stdout_logfile=/app/logs/access/nginx.out.log
stdout_logfile=/app/logs/access/nginx.log

14
aio/supervisord-slim-base Normal file
View File

@ -0,0 +1,14 @@
[supervisord]
user=root
nodaemon=true
stderr_logfile=/app/logs/error/supervisor.err.log
stdout_logfile=/app/logs/access/supervisor.log
[program:nginx]
directory=/app/data/nginx
command=/app/nginx-start.sh
autostart=true
autorestart=true
stderr_logfile=/app/logs/error/nginx.err.log
stdout_logfile=/app/logs/access/nginx.log

View File

@ -1,115 +0,0 @@
[supervisord]
user=root
nodaemon=true
priority=1
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
[program:redis]
directory=/app/data/redis
command=redis-server
autostart=true
autorestart=true
priority=1
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
[program:postgresql]
user=postgres
command=/usr/lib/postgresql/15/bin/postgres -D /var/lib/postgresql/data --config-file=/etc/postgresql/postgresql.conf
autostart=true
autorestart=true
priority=1
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
[program:minio]
directory=/app/data/minio
command=minio server /app/data/minio
autostart=true
autorestart=true
priority=1
stdout_logfile=/app/logs/access/minio.log
stderr_logfile=/app/logs/error/minio.err.log
[program:nginx]
command=/app/nginx-start.sh
autostart=true
autorestart=true
priority=1
stdout_logfile=/app/logs/access/nginx.log
stderr_logfile=/app/logs/error/nginx.err.log
[program:web]
command=/app/aio.sh web
autostart=true
autorestart=true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
environment=PORT=3001,HOSTNAME=0.0.0.0
[program:space]
command=/app/aio.sh space
autostart=true
autorestart=true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
environment=PORT=3002,HOSTNAME=0.0.0.0
[program:admin]
command=/app/aio.sh admin
autostart=true
autorestart=true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
environment=PORT=3003,HOSTNAME=0.0.0.0
[program:migrator]
command=/app/aio.sh migrator
autostart=true
autorestart=false
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
[program:api]
command=/app/aio.sh api
autostart=true
autorestart=true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
[program:worker]
command=/app/aio.sh worker
autostart=true
autorestart=true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0
[program:beat]
command=/app/aio.sh beat
autostart=true
autorestart=true
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stdout
stderr_logfile_maxbytes=0