Compare commits

..

No commits in common. "main" and "v2.0.0" have entirely different histories.
main ... v2.0.0

353 changed files with 5079 additions and 22178 deletions

1
.gitattributes vendored
View file

@ -1 +0,0 @@
* text=auto

View file

@ -1,16 +1,15 @@
---
name: 🐛 Bug report - no questions and no support!
about: Help us improving by reporting a bug - this category is not for questions and also not for support! Please use one of the options below for questions and support
labels: 0. Needs triage
name: 🐛 Bug report
about: Help us improving by reporting a bug
labels: bug, 0. Needs triage
---
<!---
- Before submitting a bug report, please read through the documentation available at https://github.com/nextcloud/all-in-one#faq
- Additional documentation is available here: https://github.com/nextcloud/all-in-one/discussions/categories/wiki
- You should also read through existing questions and their answer here: https://github.com/nextcloud/all-in-one/discussions/categories/questions
- Additional threads can be found here: https://help.nextcloud.com/tag/aio
- Existing feature requests are listed here: https://github.com/nextcloud/all-in-one/discussions/categories/ideas
--->
<!--- Please keep this note for other contributors -->
### How to use GitHub
* Please use the 👍 [reaction](https://blog.github.com/2016-03-10-add-reactions-to-pull-requests-issues-and-comments/) to show that you are affected by the same issue.
* Please don't comment if you have no relevant information to add. It's just extra noise for everyone subscribed to this issue.
* Subscribe to receive notifications on status change and new comments.
<!--- Please fill out the whole template below -->
### Steps to reproduce
@ -23,17 +22,11 @@ labels: 0. Needs triage
### Actual behavior <!--- Tell us what happens instead -->
### Other information
#### Host OS <!--- (the host OS on which you are trying to install AIO on) -->
### Host OS <!--- (the host OS on which you are trying to install AIO on) -->
#### Output of `sudo docker info`
#### Docker run command or docker-compose file that you used
#### Nextcloud AIO version <!--- (see Nextcloud AIO interface) -->
#### Output of `sudo docker logs nextcloud-aio-mastercontainer`
#### Current channel <!--- (see the channel name in the AIO interface) -->
#### Output of `sudo docker inspect nextcloud-aio-mastercontainer`
#### Output of `sudo docker ps -a`
#### Other valuable info <!--- (like additional logs, screenshots & Co.) -->
#### Other valuable info <!--- (like logs, screenshots & Co.) -->

View file

@ -1,9 +1,15 @@
---
name: 📖 Existing feature/documentation enhancement
about: Suggest an enhancement of an existing feature/documentation - for other types, please use the feature request option below
labels: 0. Needs triage
labels: enhancement, 0. Needs triage
---
<!--- Please keep this note for other contributors -->
### How to use GitHub
* Please use the 👍 [reaction](https://blog.github.com/2016-03-10-add-reactions-to-pull-requests-issues-and-comments/) to show that you are interested into the same feature.
* Please don't comment if you have no relevant information to add. It's just extra noise for everyone subscribed to this issue.
* Subscribe to receive notifications on status change and new comments.
<!--- Please fill out the whole template below -->
### Is your feature request related to a problem? Please describe.
<!--- A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] -->

View file

@ -1,14 +1,14 @@
blank_issues_enabled: false
contact_links:
- name: 📘 Documentation on Nextcloud AIO
url: https://github.com/nextcloud/all-in-one#faq
about: Please read the docs first before submitting any report or request!
- name: ⛑️ Questions and support
url: https://help.nextcloud.com/tag/aio
about: For questions, support and help
- name: 💡 Suggest a new feature or discuss one
url: https://github.com/nextcloud/all-in-one/discussions/categories/ideas
about: For new feature requests and discussion of existing ones
- name: ❓ Questions on AIO
url: https://github.com/nextcloud/all-in-one/discussions/categories/questions
about: For questions regarding AIO
- name: ⛑️ Community Support and Help
url: https://help.nextcloud.com/tag/aio
about: For other types of questions
- name: 💼 Nextcloud Enterprise
url: https://portal.nextcloud.com/
about: If you are a Nextcloud Enterprise customer, or need Professional support, so it can be resolved directly by our dedicated engineers more quickly
about: If you are a Nextcloud Enterprise customer, or need Professional support, so it can be resolved directly by our dedicated engineers more quickly

156
.github/dependabot.yml vendored
View file

@ -1,62 +1,158 @@
version: 2
updates:
- package-ecosystem: "github-actions"
directory: ".github/workflows"
directory: "/"
schedule:
interval: "daily"
time: "12:00"
open-pull-requests-limit: 10
rebase-strategy: "disabled"
labels:
- 3. to review
- dependencies
cooldown:
default-days: 7
- package-ecosystem: composer
directory: "/php/"
schedule:
interval: "daily"
time: "12:00"
open-pull-requests-limit: 10
rebase-strategy: "auto"
labels:
- 3. to review
- dependencies
- package-ecosystem: "docker"
directories:
- "/Containers/alpine"
- "/Containers/apache"
- "/Containers/borgbackup"
- "/Containers/clamav"
- "/Containers/collabora"
- "/Containers/docker-socket-proxy"
- "/Containers/domaincheck"
- "/Containers/fulltextsearch"
- "/Containers/imaginary"
- "/Containers/mastercontainer"
- "/Containers/nextcloud"
- "/Containers/notify-push"
- "/Containers/onlyoffice"
- "/Containers/postgresql"
- "/Containers/redis"
- "/Containers/talk"
- "/Containers/talk-recording"
- "/Containers/watchtower"
- "/Containers/whiteboard"
directory: "/Containers/apache"
schedule:
interval: "daily"
time: "04:00"
time: "12:00"
open-pull-requests-limit: 10
rebase-strategy: "disabled"
labels:
- 3. to review
- dependencies
- package-ecosystem: "docker"
directory: "/Containers/borgbackup"
schedule:
interval: "daily"
time: "12:00"
open-pull-requests-limit: 10
labels:
- 3. to review
- dependencies
- package-ecosystem: "docker"
directory: "/Containers/collabora"
schedule:
interval: "daily"
time: "12:00"
open-pull-requests-limit: 10
labels:
- 3. to review
- dependencies
- package-ecosystem: "docker"
directory: "/Containers/domaincheck"
schedule:
interval: "daily"
time: "12:00"
open-pull-requests-limit: 10
labels:
- 3. to review
- dependencies
- package-ecosystem: "docker"
directory: "/Containers/mastercontainer"
schedule:
interval: "daily"
time: "12:00"
ignore:
- dependency-name: "php"
update-types: ["version-update:semver-major", "version-update:semver-minor"]
open-pull-requests-limit: 10
labels:
- 3. to review
- dependencies
- package-ecosystem: "docker"
directory: "/Containers/nextcloud"
schedule:
interval: "daily"
time: "12:00"
ignore:
- dependency-name: "php"
update-types: ["version-update:semver-major", "version-update:semver-minor"]
open-pull-requests-limit: 10
labels:
- 3. to review
- dependencies
- package-ecosystem: "docker"
directory: "/Containers/postgresql"
schedule:
interval: "daily"
time: "12:00"
ignore:
- dependency-name: "postgres"
update-types: ["version-update:semver-major"]
open-pull-requests-limit: 10
labels:
- 3. to review
- dependencies
- package-ecosystem: "docker"
directory: "/Containers/redis"
schedule:
interval: "daily"
time: "12:00"
ignore:
- dependency-name: "redis"
update-types: ["version-update:semver-major"]
open-pull-requests-limit: 10
labels:
- 3. to review
- dependencies
- package-ecosystem: "docker"
directory: "/Containers/talk"
schedule:
interval: "daily"
time: "12:00"
open-pull-requests-limit: 10
labels:
- 3. to review
- dependencies
- package-ecosystem: "docker"
directory: "/Containers/watchtower"
schedule:
interval: "daily"
time: "12:00"
open-pull-requests-limit: 10
labels:
- 3. to review
- dependencies
- package-ecosystem: "docker"
directory: "/Containers/clamav"
schedule:
interval: "daily"
time: "12:00"
open-pull-requests-limit: 10
labels:
- 3. to review
- dependencies
- package-ecosystem: "docker"
directory: "/Containers/onlyoffice"
schedule:
interval: "daily"
time: "12:00"
open-pull-requests-limit: 10
labels:
- 3. to review
- dependencies
- package-ecosystem: "docker"
directory: "/Containers/imaginary"
schedule:
interval: "daily"
time: "12:00"
open-pull-requests-limit: 10
labels:
- 3. to review
- dependencies
- package-ecosystem: "docker"
directory: "/Containers/fulltextsearch"
schedule:
interval: "daily"
time: "12:00"
ignore:
- dependency-name: "elasticsearch"
update-types: ["version-update:semver-major"]
open-pull-requests-limit: 10
labels:
- 3. to review
- dependencies

View file

@ -1,5 +0,0 @@
<!--
- 🚨 SECURITY INFO
-
- Before sending a pull request that fixes a security issue please report it via our HackerOne page (https://hackerone.com/nextcloud) following our security policy (https://nextcloud.com/security/). This allows us to coordinate the fix and release without potentially exposing all Nextcloud servers and users in the meantime.
-->

14
.github/release.yml vendored
View file

@ -1,14 +0,0 @@
changelog:
categories:
- title: 🏕 New features and other improvements
labels:
- enhancement
- title: 🐞 Fixed bugs
labels:
- bug
- title: 👒 Updated dependencies
labels:
- dependencies
- title: 📄 Improved documentation
labels:
- documentation

View file

@ -1,20 +0,0 @@
name: 'Codespell'
on:
pull_request:
push:
branches:
- main
jobs:
codespell:
name: Check spelling
runs-on: ubuntu-latest
steps:
- name: Check out code
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Check spelling
uses: codespell-project/actions-codespell@8f01853be192eb0f849a5c7d721450e7a467c579 # v2
with:
check_filenames: true
check_hidden: true

View file

@ -1,29 +0,0 @@
name: collabora-update
on:
workflow_dispatch:
schedule:
- cron: '00 12 * * *'
jobs:
collabora-update:
name: update collabora
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Run collabora-profile-update
run: |
rm -f php/cool-seccomp-profile.json
wget https://raw.githubusercontent.com/CollaboraOnline/online/refs/heads/main/docker/cool-seccomp-profile.json
mv cool-seccomp-profile.json php/
- name: Create Pull Request
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v7
with:
commit-message: collabora-seccomp-update automated change
signoff: true
title: collabora seccomp update
body: Automated collabora seccomp profile update
labels: dependencies, 3. to review
milestone: next
branch: collabora-seccomp-update

51
.github/workflows/command-rebase.yml vendored Normal file
View file

@ -0,0 +1,51 @@
# This workflow is provided via the organization template repository
#
# https://github.com/nextcloud/.github
# https://docs.github.com/en/actions/learn-github-actions/sharing-workflows-with-your-organization
name: Rebase command
on:
issue_comment:
types: created
permissions:
contents: read
jobs:
rebase:
runs-on: ubuntu-latest
permissions:
contents: none
# On pull requests and if the comment starts with `/rebase`
if: github.event.issue.pull_request != '' && startsWith(github.event.comment.body, '/rebase')
steps:
- name: Add reaction on start
uses: peter-evans/create-or-update-comment@v2
with:
token: ${{ secrets.COMMAND_BOT_PAT }}
repository: ${{ github.event.repository.full_name }}
comment-id: ${{ github.event.comment.id }}
reaction-type: "+1"
- name: Checkout the latest code
uses: actions/checkout@v3
with:
fetch-depth: 0
token: ${{ secrets.COMMAND_BOT_PAT }}
- name: Automatic Rebase
uses: cirrus-actions/rebase@1.7
env:
GITHUB_TOKEN: ${{ secrets.COMMAND_BOT_PAT }}
- name: Add reaction on failure
uses: peter-evans/create-or-update-comment@v2
if: failure()
with:
token: ${{ secrets.COMMAND_BOT_PAT }}
repository: ${{ github.event.repository.full_name }}
comment-id: ${{ github.event.comment.id }}
reaction-type: "-1"

View file

@ -1,37 +0,0 @@
name: Validate community containers
on:
pull_request:
paths:
- 'community-containers/**'
push:
branches:
- main
paths:
- 'community-containers/**'
jobs:
validator-community-containers:
name: Validate community containers
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Validate structure
run: |
CONTAINERS="$(find ./community-containers -mindepth 1 -maxdepth 1 -type d)"
mapfile -t CONTAINERS <<< "$CONTAINERS"
for container in "${CONTAINERS[@]}"; do
container="$(echo "$container" | sed 's|./community-containers/||')"
if ! [ -f ./community-containers/"$container"/"$container.json" ]; then
echo ".json file must be named like its parent folder $container"
FAIL=1
fi
if ! [ -f ./community-containers/"$container"/readme.md ]; then
echo "There must be a readme.md file in the folder!"
FAIL=1
fi
if [ -n "$FAIL" ]; then
exit 1
fi
done

View file

@ -0,0 +1,54 @@
name: Create Psalm Container
on:
workflow_dispatch:
schedule:
- cron: '5 4 * * *'
jobs:
push_to_registry:
runs-on: ubuntu-latest
name: Create Psalm Container
permissions:
packages: write
contents: read
steps:
- name: Check out the repo
run: |
git clone https://github.com/psalm/psalm-github-actions.git
- name: Modify the Dockerfile
run: |
set -x
sed -i 's|FROM php:7.4-alpine|FROM php:8.0-alpine|' "psalm-github-actions/Dockerfile"
cat << APCU >> "psalm-github-actions/Dockerfile"
RUN mkdir -p /usr/src/php/ext/apcu && \
curl -fsSL https://pecl.php.net/get/apcu | tar xvz -C "/usr/src/php/ext/apcu" --strip 1 && \
docker-php-ext-install apcu
APCU
- name: Log in to GitHub Docker Registry
uses: docker/login-action@v2
with:
registry: docker.pkg.github.com
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Log in to GitHub Container Registry
uses: docker/login-action@v2
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Build container image
uses: docker/build-push-action@v3
with:
push: true
context: 'psalm-github-actions'
file: 'psalm-github-actions/Dockerfile'
tags: |
ghcr.io/nextcloud/all-in-one-psalm:latest

View file

@ -1,7 +1,6 @@
name: dependency-updates
on:
workflow_dispatch:
schedule:
- cron: '00 12 * * *'
@ -10,27 +9,28 @@ jobs:
name: Run dependency update script
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- uses: shivammathur/setup-php@7bf05c6b704e0b9bfee22300130a31b5ea68d593 # v2
- uses: actions/checkout@v3
- uses: nanasess/setup-php@master
with:
php-version: 8.4
extensions: apcu
php-version: '8.0'
- name: Run dependency update script
run: |
set -x
curl -sS https://getcomposer.org/installer | php
mv composer.phar /usr/local/bin/composer
chmod +x /usr/local/bin/composer
cd ./php
composer update --with-all-dependencies
# Disable dependency updates for now
# set +e
# ALL_LINES="$(composer outdated | grep -v "^$\|Direct dependencies\|Everything up to date\|Transitive dependencies")"
# set -e
# while [ -n "$ALL_LINES" ]; do
# CURRENT_LINE="$(echo "$ALL_LINES" | head -1)"
# composer require "$(echo "$CURRENT_LINE" | awk '{print $1}')" "^$(echo "$CURRENT_LINE" | awk '{print $4}')" --with-all-dependencies
# ALL_LINES="$(echo "$ALL_LINES" | sed '1d')"
# done
# echo "outdated dependencies:
# $(composer outdated)"
composer update
set +e
ALL_LINES="$(composer outdated | grep -v "psr/container\|^$\|Direct dependencies\|Everything up to date\|Transitive dependencies")"
set -e
while [ -n "$ALL_LINES" ]; do
CURRENT_LINE="$(echo "$ALL_LINES" | head -1)"
composer require "$(echo "$CURRENT_LINE" | awk '{print $1}')" "^$(echo "$CURRENT_LINE" | awk '{print $4}')"
ALL_LINES="$(echo "$ALL_LINES" | sed '1d')"
done
echo "outdated dependencies:
$(composer outdated)"
- name: Update apcu
run: |
# APCU
@ -44,12 +44,12 @@ jobs:
)"
sed -i "s|pecl install APCu.*\;|pecl install APCu-$apcu_version\;|" ./Containers/mastercontainer/Dockerfile
- name: Create Pull Request
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v7
uses: peter-evans/create-pull-request@v4
with:
commit-message: php dependency updates
commit-message: dependency updates
signoff: true
title: PHP dependency updates
body: Automated php dependency updates since dependabot does not support grouped updates
labels: dependencies, 3. to review
title: Dependency updates
body: Automated dependency updates since dependabot does not support grouped updates
labels: dependencies, enhancement
milestone: next
branch: aio-dependency-update

View file

@ -1,46 +0,0 @@
name: Docker Lint
on:
pull_request:
paths:
- 'Containers/**'
push:
branches:
- main
paths:
- 'Containers/**'
permissions:
contents: read
concurrency:
group: docker-lint-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
docker-lint:
runs-on: ubuntu-latest
name: docker-lint
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Install hadolint
run: |
sudo wget https://github.com/hadolint/hadolint/releases/latest/download/hadolint-Linux-x86_64 -O /usr/bin/hadolint
sudo chmod +x /usr/bin/hadolint
- name: run lint
run: |
DOCKERFILES="$(find ./Containers -name Dockerfile)"
mapfile -t DOCKERFILES <<< "$DOCKERFILES"
for file in "${DOCKERFILES[@]}"; do
# DL3018 warning: Pin versions in apk add. Instead of `apk add <package>` use `apk add <package>=<version>`
# DL4006 warning: Set the SHELL option -o pipefail before RUN with a pipe in it. If you are using /bin/sh in an alpine image or if your shell is symlinked to busybox then consider explicitly setting your SHELL to /bin/ash, or disable this check
hadolint "$file" --ignore DL3018 --ignore DL4006 | tee -a ./hadolint.log
done
if grep -q "DL[0-9]\+\|SC[0-9]\+" ./hadolint.log; then
exit 1
fi

View file

@ -1,50 +0,0 @@
name: Block if prerelease is present
on:
pull_request:
permissions:
contents: read
jobs:
check-latest-release:
runs-on: ubuntu-latest
steps:
- name: "Check latest published release isn't a prerelease"
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v6
with:
script: |
const tags = await github.rest.repos.listTags({
owner: context.repo.owner,
repo: context.repo.repo,
per_page: 1
});
if (!tags.data || tags.data.length === 0) {
core.info('No tags found for this repository; skipping prerelease check.');
return;
}
const latestTag = tags.data[0].name;
core.info(`Latest tag found: ${latestTag}`);
try {
const { data } = await github.rest.repos.getReleaseByTag({
owner: context.repo.owner,
repo: context.repo.repo,
tag: latestTag
});
if (data.prerelease) {
core.setFailed(`Release for tag ${latestTag} (${data.tag_name}) is a prerelease. Blocking merges to main as we need to wait for the prerelease to become stable.`);
} else {
core.info(`Release for tag ${latestTag} (${data.tag_name}) is not a prerelease.`);
}
} catch (err) {
if (err.status === 404) {
core.info(`No release found for tag ${latestTag}; skipping prerelease check.`);
} else {
throw err;
}
}

View file

@ -1,51 +0,0 @@
name: Helm Chart Releaser
on:
push:
branches:
- main
paths:
- 'nextcloud-aio-helm-chart/**'
jobs:
release:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Turnstyle
uses: softprops/turnstyle@e565d2d86403c5d23533937e95980570545e5586 # v2
with:
continue-after-seconds: 180
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Fetch history
run: git fetch --prune --unshallow
- name: Configure Git
run: |
git config user.name "$GITHUB_ACTOR"
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
# See https://github.com/helm/chart-releaser-action/issues/6
- name: Set up Helm
uses: azure/setup-helm@1a275c3b69536ee54be43f2070a358922e12c8d4 # v4
with:
version: v3.6.3
- name: Run Helm Lint
run: |
helm lint ./nextcloud-aio-helm-chart
- name: Run chart-releaser
uses: helm/chart-releaser-action@cae68fefc6b5f367a0275617c9f83181ba54714f # v1.7.0
with:
mark_as_latest: false
charts_dir: .
env:
CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
CR_RELEASE_NAME_TEMPLATE: "helm-chart-{{ .Version }}"
CR_SKIP_EXISTING: true

View file

@ -1,33 +0,0 @@
name: imaginary-update
on:
workflow_dispatch:
schedule:
- cron: '00 12 * * *'
jobs:
run_update:
name: update to latest imaginary commit on master branch
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Run imaginary-update
run: |
# Imaginary
imaginary_version="$(
git ls-remote https://github.com/h2non/imaginary master \
| cut -f1 \
| tail -1
)"
sed -i "s|^ENV IMAGINARY_HASH.*$|ENV IMAGINARY_HASH=$imaginary_version|" ./Containers/imaginary/Dockerfile
- name: Create Pull Request
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v7
with:
commit-message: imaginary-update automated change
signoff: true
title: Imaginary update
body: Automated Imaginary container update
labels: dependencies, 3. to review
milestone: next
branch: imaginary-container-update

View file

@ -1,37 +1,20 @@
name: Json Validator
on:
pull_request:
paths:
- '**.json'
push:
branches:
- main
paths:
- '**.json'
jobs:
json-validator:
name: Json Validator
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Validate Json
run: |
sudo apt-get update
sudo apt-get install python3-venv -y --no-install-recommends
python3 -m venv venv
. venv/bin/activate
pip3 install json-spec
if ! json validate --schema-file=php/containers-schema.json --document-file=php/containers.json; then
exit 1
fi
JSON_FILES="$(find ./community-containers -name '*.json')"
mapfile -t JSON_FILES <<< "$JSON_FILES"
for file in "${JSON_FILES[@]}"; do
json validate --schema-file=php/containers-schema.json --document-file="$file" 2>&1 | tee -a ./json-validator.log
done
if grep -q "document does not validate with schema.\|invalid JSONFile" ./json-validator.log; then
exit 1
fi
name: Json Validator
on:
pull_request:
push:
branches:
- main
jobs:
psalm:
name: Json Validator
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v3
- name: Validate Json
run: |
sudo apt install python3-pip --no-install-recommends
sudo pip3 install json-spec
json validate --schema-file=php/containers-schema.json --document-file=php/containers.json

View file

@ -1,24 +0,0 @@
name: Lint Helm Charts
on:
workflow_dispatch:
pull_request:
paths:
- 'nextcloud-aio-helm-chart/**'
jobs:
lint-helm:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
fetch-depth: 0
- name: Install Helm
uses: azure/setup-helm@1a275c3b69536ee54be43f2070a358922e12c8d4 # v4
with:
version: v3.11.1
- name: Lint charts
run: helm lint nextcloud-aio-helm-chart

View file

@ -1,67 +1,48 @@
# This workflow is provided via the organization template repository
#
# https://github.com/nextcloud/.github
# https://docs.github.com/en/actions/learn-github-actions/sharing-workflows-with-your-organization
#
# SPDX-FileCopyrightText: 2021-2024 Nextcloud GmbH and Nextcloud contributors
# SPDX-License-Identifier: MIT
name: Lint php
on:
pull_request:
paths:
- 'php/**'
push:
branches:
- main
paths:
- 'php/**'
permissions:
contents: read
concurrency:
group: lint-php-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
php-lint:
runs-on: ubuntu-latest
strategy:
matrix:
php-versions: [ "8.4" ]
name: php-lint
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
persist-credentials: false
- name: Set up php ${{ matrix.php-versions }}
uses: shivammathur/setup-php@7bf05c6b704e0b9bfee22300130a31b5ea68d593 # v2.36.0
with:
php-version: ${{ matrix.php-versions }}
coverage: none
ini-file: development
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Lint
run: cd php && composer run lint
summary:
permissions:
contents: none
runs-on: ubuntu-latest-low
needs: php-lint
if: always()
name: php-lint-summary
steps:
- name: Summary status
run: if ${{ needs.php-lint.result != 'success' && needs.php-lint.result != 'skipped' }}; then exit 1; fi
# This workflow is provided via the organization template repository
#
# https://github.com/nextcloud/.github
# https://docs.github.com/en/actions/learn-github-actions/sharing-workflows-with-your-organization
name: Lint
on:
pull_request:
push:
branches:
- main
- master
- stable*
jobs:
php-lint:
runs-on: ubuntu-latest
strategy:
matrix:
php-versions: ["8.0"]
name: php-lint
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Set up php ${{ matrix.php-versions }}
uses: shivammathur/setup-php@v2
with:
php-version: ${{ matrix.php-versions }}
coverage: none
- name: Lint
run: cd php && composer run lint
summary:
runs-on: ubuntu-latest
needs: php-lint
if: always()
name: php-lint-summary
steps:
- name: Summary status
run: if ${{ needs.php-lint.result != 'success' && needs.php-lint.result != 'skipped' }}; then exit 1; fi

View file

@ -1,42 +0,0 @@
# This workflow is provided via the organization template repository
#
# https://github.com/nextcloud/.github
# https://docs.github.com/en/actions/learn-github-actions/sharing-workflows-with-your-organization
#
# SPDX-FileCopyrightText: 2021-2024 Nextcloud GmbH and Nextcloud contributors
# SPDX-License-Identifier: MIT
name: Lint YAML
on:
pull_request:
paths:
- '**.yml'
permissions:
contents: read
jobs:
yaml-lint:
runs-on: ubuntu-latest
name: yaml
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.1
with:
persist-credentials: false
- name: GitHub action templates lint
uses: ibiqlik/action-yamllint@2576378a8e339169678f9939646ee3ee325e845c # v3.1.1
with:
file_or_dir: .github/workflows
config_data: |
line-length: warning
- name: Install the latest version of uv
uses: astral-sh/setup-uv@61cb8a9741eeb8a550a1b8544337180c0fc8476b # v7.2.0
- name: Check GitHub actions
run: uvx zizmor --min-severity medium .github/workflows/*.yml

View file

@ -14,7 +14,7 @@ jobs:
action:
runs-on: ubuntu-latest
steps:
- uses: dessant/lock-threads@7266a7ce5c1df01b1c6db85bf8cd86c737dadbe7 # v5
- uses: dessant/lock-threads@v3
with:
issue-inactive-days: '14'
process-only: 'issues'

View file

@ -2,7 +2,6 @@
name: nextcloud-update
on:
workflow_dispatch:
schedule:
- cron: '00 12 * * *'
@ -11,7 +10,7 @@ jobs:
name: Run nextcloud-update script
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- uses: actions/checkout@v3
- name: Run nextcloud-update script
run: |
# Inspired by https://github.com/nextcloud/docker/blob/master/update.sh
@ -25,7 +24,7 @@ jobs:
| sort -V \
| tail -1
)"
sed -i "s|\(pecl install[^;]*APCu-\)[0-9.]*|\1$apcu_version|" ./Containers/nextcloud/Dockerfile
sed -i "s|pecl install APCu.*\;|pecl install APCu-$apcu_version\;|" ./Containers/nextcloud/Dockerfile
# Memcached
memcached_version="$(
@ -36,7 +35,7 @@ jobs:
| sort -V \
| tail -1
)"
sed -i "s|\(pecl install[^;]*memcached-\)[0-9.]*|\1$memcached_version|" ./Containers/nextcloud/Dockerfile
sed -i "s|pecl install memcached.*\;|pecl install memcached-$memcached_version\;|" ./Containers/nextcloud/Dockerfile
# Redis
redis_version="$(
@ -47,44 +46,31 @@ jobs:
| sort -V \
| tail -1
)"
sed -i "s|\(pecl install[^;]*redis-\)[0-9.]*|\1$redis_version|" ./Containers/nextcloud/Dockerfile
sed -i "s|pecl install redis.*\;|pecl install redis-$redis_version\;|" ./Containers/nextcloud/Dockerfile
# Imagick
imagick_version="$(
git ls-remote --tags https://github.com/imagick/imagick.git \
git ls-remote --tags https://github.com/mkoppanen/imagick.git \
| cut -d/ -f3 \
| grep -viE '[a-z]' \
| tr -d '^{}' \
| sort -V \
| tail -1
)"
sed -i "s|\(pecl install[^;]*imagick-\)[0-9.]*|\1$imagick_version|" ./Containers/nextcloud/Dockerfile
# Igbinary
igbinary_version="$(
git ls-remote --tags https://github.com/igbinary/igbinary.git \
| cut -d/ -f3 \
| grep -viE '[a-z]' \
| tr -d '^{}' \
| sort -V \
| tail -1
)"
sed -i "s|\(pecl install[^;]*igbinary-\)[0-9.]*|\1$igbinary_version|" ./Containers/nextcloud/Dockerfile
sed -i "s|pecl install imagick.*\;|pecl install imagick-$imagick_version\;|" ./Containers/nextcloud/Dockerfile
# Nextcloud
NC_MAJOR="$(grep "ENV NEXTCLOUD_VERSION" ./Containers/nextcloud/Dockerfile | grep -oP '[23][0-9]')"
NCVERSION=$(curl -s -m 900 https://download.nextcloud.com/server/releases/ | sed --silent 's/.*href="nextcloud-\([^"]\+\).zip.asc".*/\1/p' | grep "$NC_MAJOR" | sort --version-sort | tail -1)
if [ -n "$NCVERSION" ]; then
sed -i "s|^ENV NEXTCLOUD_VERSION.*|ENV NEXTCLOUD_VERSION=$NCVERSION|" ./Containers/nextcloud/Dockerfile
fi
sed -i "s|^ENV NEXTCLOUD_VERSION.*|ENV NEXTCLOUD_VERSION $NCVERSION|" ./Containers/nextcloud/Dockerfile
- name: Create Pull Request
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v7
uses: peter-evans/create-pull-request@v4
with:
commit-message: nextcloud-update automated change
signoff: true
title: Nextcloud dependency update
title: Nextcloud update
body: Automated Nextcloud container update
labels: dependencies, 3. to review
labels: dependencies, enhancement
milestone: next
branch: nextcloud-container-update

View file

@ -1,35 +0,0 @@
name: PHP Deprecation Detector
# See https://github.com/wapmorgan/PhpDeprecationDetector
on:
pull_request:
paths:
- 'php/**'
push:
branches:
- main
paths:
- 'php/**'
jobs:
phpdd:
name: PHP Deprecation Detector
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Set up php
uses: shivammathur/setup-php@7bf05c6b704e0b9bfee22300130a31b5ea68d593 # v2
with:
php-version: 8.4
extensions: apcu
coverage: none
- name: Run script
run: |
set -x
cd php
composer install
composer run php-deprecation-detector | tee -i ./phpdd.log
if grep "Total issues:" ./phpdd.log; then
exit 1
fi

View file

@ -1,123 +0,0 @@
name: Playwright Tests on push
on:
pull_request:
paths:
- 'php/**'
push:
branches:
- main
paths:
- 'php/**'
concurrency:
group: playwright-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
env:
BASE_URL: https://localhost:8080
jobs:
test:
timeout-minutes: 60
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- uses: actions/setup-node@6044e13b5dc448c55e2357c09f80417699197238 # v6
with:
node-version: lts/*
- name: Install dependencies
run: cd php/tests && npm ci
- name: Install Playwright Browsers
run: cd php/tests && npx playwright install --with-deps chromium
- name: Set up php 8.4
uses: shivammathur/setup-php@7bf05c6b704e0b9bfee22300130a31b5ea68d593 # v2.36.0
with:
extensions: apcu
php-version: 8.4
coverage: none
ini-file: development
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Adjust some things and fix permissions
run: |
cd php
rm -r ./data
rm -r ./session
composer install --no-dev
composer clear-cache
sudo chmod 777 -R ./
- name: Start fresh development server
run: |
docker rm --force nextcloud-aio-{mastercontainer,apache,notify-push,nextcloud,redis,database,domaincheck,whiteboard,imaginary,talk,collabora,borgbackup} || true
docker volume rm nextcloud_aio_{mastercontainer,apache,database,database_dump,nextcloud,nextcloud_data,redis,backup_cache,elasticsearch} || true
docker pull ghcr.io/nextcloud-releases/all-in-one:develop
docker run \
-d \
--init \
--name nextcloud-aio-mastercontainer \
--restart always \
--publish 8080:8080 \
--volume nextcloud_aio_mastercontainer:/mnt/docker-aio-config \
--volume ./php:/var/www/docker-aio/php \
--volume /var/run/docker.sock:/var/run/docker.sock:ro \
--env SKIP_DOMAIN_VALIDATION=true \
--env APACHE_PORT=11000 \
ghcr.io/nextcloud-releases/all-in-one:develop
echo Waiting for 10 seconds for the development container to start ...
sleep 10
- name: Run Playwright tests for initial setup
run: |
cd php/tests
export DEBUG=pw:api
if ! npx playwright test tests/initial-setup.spec.js; then
docker logs nextcloud-aio-mastercontainer
docker logs nextcloud-aio-borgbackup
exit 1
fi
- name: Start fresh development server
run: |
docker rm --force nextcloud-aio-{mastercontainer,apache,notify-push,nextcloud,redis,database,domaincheck,whiteboard,imaginary,talk,collabora,borgbackup} || true
docker volume rm nextcloud_aio_{mastercontainer,apache,database,database_dump,nextcloud,nextcloud_data,redis,backup_cache,elasticsearch} || true
docker run \
-d \
--init \
--name nextcloud-aio-mastercontainer \
--restart always \
--publish 8080:8080 \
--volume nextcloud_aio_mastercontainer:/mnt/docker-aio-config \
--volume ./php:/var/www/docker-aio/php \
--volume /var/run/docker.sock:/var/run/docker.sock:ro \
--env SKIP_DOMAIN_VALIDATION=false \
--env APACHE_PORT=11000 \
ghcr.io/nextcloud-releases/all-in-one:develop
echo Waiting for 10 seconds for the development container to start ...
sleep 10
- name: Run Playwright tests for backup restore
run: |
cd php/tests
export DEBUG=pw:api
if ! npx playwright test tests/restore-instance.spec.js; then
docker logs nextcloud-aio-mastercontainer
docker logs nextcloud-aio-borgbackup
exit 1
fi
- uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6
if: ${{ !cancelled() }}
with:
name: playwright-report
path: php/tests/playwright-report/
retention-days: 14
overwrite: true

View file

@ -1,91 +0,0 @@
name: Playwright Tests
on:
workflow_dispatch:
env:
BASE_URL: https://localhost:8080
jobs:
test:
timeout-minutes: 60
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- uses: actions/setup-node@6044e13b5dc448c55e2357c09f80417699197238 # v6
with:
node-version: lts/*
- name: Install dependencies
run: cd php/tests && npm ci
- name: Install Playwright Browsers
run: cd php/tests && npx playwright install --with-deps chromium
- name: Start fresh development server
run: |
docker rm --force nextcloud-aio-{mastercontainer,apache,notify-push,nextcloud,redis,database,domaincheck,whiteboard,imaginary,talk,collabora,borgbackup} || true
docker volume rm nextcloud_aio_{mastercontainer,apache,database,database_dump,nextcloud,nextcloud_data,redis,backup_cache,elasticsearch} || true
docker pull ghcr.io/nextcloud-releases/all-in-one:develop
docker run \
-d \
--init \
--name nextcloud-aio-mastercontainer \
--restart always \
--publish 8080:8080 \
--volume nextcloud_aio_mastercontainer:/mnt/docker-aio-config \
--volume /var/run/docker.sock:/var/run/docker.sock:ro \
--env SKIP_DOMAIN_VALIDATION=true \
--env APACHE_PORT=11000 \
ghcr.io/nextcloud-releases/all-in-one:develop
echo Waiting for 10 seconds for the development container to start ...
sleep 10
- name: Run Playwright tests for initial setup
run: |
cd php/tests
export DEBUG=pw:api
if ! npx playwright test tests/initial-setup.spec.js; then
docker logs nextcloud-aio-mastercontainer
docker logs nextcloud-aio-borgbackup
exit 1
fi
- name: Start fresh development server
run: |
docker rm --force nextcloud-aio-{mastercontainer,apache,notify-push,nextcloud,redis,database,domaincheck,whiteboard,imaginary,talk,collabora,borgbackup} || true
docker volume rm nextcloud_aio_{mastercontainer,apache,database,database_dump,nextcloud,nextcloud_data,redis,backup_cache,elasticsearch} || true
docker run \
-d \
--init \
--name nextcloud-aio-mastercontainer \
--restart always \
--publish 8080:8080 \
--volume nextcloud_aio_mastercontainer:/mnt/docker-aio-config \
--volume /var/run/docker.sock:/var/run/docker.sock:ro \
--env SKIP_DOMAIN_VALIDATION=false \
--env APACHE_PORT=11000 \
ghcr.io/nextcloud-releases/all-in-one:develop
echo Waiting for 10 seconds for the development container to start ...
sleep 10
- name: Run Playwright tests for backup restore
run: |
cd php/tests
export DEBUG=pw:api
if ! npx playwright test tests/restore-instance.spec.js; then
docker logs nextcloud-aio-mastercontainer
docker logs nextcloud-aio-borgbackup
exit 1
fi
- uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6
if: ${{ !cancelled() }}
with:
name: playwright-report
path: php/tests/playwright-report/
retention-days: 14
overwrite: true

28
.github/workflows/psalm-analysis.yml vendored Normal file
View file

@ -0,0 +1,28 @@
name: Psalm Analysis
on:
pull_request:
push:
branches:
- main
jobs:
psalm:
name: Psalm
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Set up php8.0
uses: shivammathur/setup-php@v2
with:
php-version: 8.0
extensions: apcu
coverage: none
- name: Run script
run: |
set -x
cd php
composer global require vimeo/psalm --prefer-dist --no-progress --dev
composer install
composer run psalm

25
.github/workflows/psalm-security.yml vendored Normal file
View file

@ -0,0 +1,25 @@
name: Psalm Security Analysis
on:
push:
branches:
- main
jobs:
psalm:
name: Psalm
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v3
- name: Psalm
uses: docker://ghcr.io/nextcloud/all-in-one-psalm
with:
relative_dir: php
security_analysis: true
composer_ignore_platform_reqs: false
report_file: results.sarif
- name: Upload Security Analysis results to GitHub
uses: github/codeql-action/upload-sarif@v2
with:
sarif_file: php/results.sarif

View file

@ -10,12 +10,12 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- uses: actions/checkout@v3
- name: Set up php
uses: shivammathur/setup-php@7bf05c6b704e0b9bfee22300130a31b5ea68d593 # v2
- name: Set up php8.0
uses: shivammathur/setup-php@v2
with:
php-version: 8.4
php-version: 8.0
extensions: apcu
coverage: none
@ -23,14 +23,15 @@ jobs:
run: |
set -x
cd php
composer global require vimeo/psalm --prefer-dist --no-progress --dev
composer install
composer run psalm:update-baseline
composer run psalm -- --monochrome --no-progress --output-format=text --update-baseline
git clean -f lib/composer
git checkout composer.json composer.lock lib/composer
continue-on-error: true
- name: Create Pull Request
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v7
uses: peter-evans/create-pull-request@v4
with:
token: ${{ secrets.COMMAND_BOT_PAT }}
commit-message: Update psalm baseline
@ -38,9 +39,10 @@ jobs:
author: nextcloud-command <nextcloud-command@users.noreply.github.com>
signoff: true
branch: automated/noid/psalm-baseline-update
# Make sure we can open multiple PRs
branch-suffix: timestamp
title: '[Automated] Update psalm-baseline.xml'
milestone: next
body: |
Auto-generated update psalm-baseline.xml with fixed psalm warnings
labels: |
3. to review, dependencies
3. to review

View file

@ -1,56 +0,0 @@
# This workflow is provided via the organization template repository
#
# https://github.com/nextcloud/.github
# https://docs.github.com/en/actions/learn-github-actions/sharing-workflows-with-your-organization
#
# SPDX-FileCopyrightText: 2022-2024 Nextcloud GmbH and Nextcloud contributors
# SPDX-License-Identifier: MIT
name: Static analysis
on:
pull_request:
paths:
- 'php/**'
push:
branches:
- main
paths:
- 'php/**'
concurrency:
group: psalm-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
permissions:
contents: read
jobs:
static-analysis:
runs-on: ubuntu-latest
name: static-psalm-analysis
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
persist-credentials: false
- name: Set up php
uses: shivammathur/setup-php@7bf05c6b704e0b9bfee22300130a31b5ea68d593 # v2.36.0
with:
php-version: 8.4
extensions: apcu
coverage: none
ini-file: development
# Temporary workaround for missing pcntl_* in PHP 8.3
ini-values: disable_functions=
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Install dependencies and run psalm
run: |
set -x
cd php
composer install
composer run psalm

View file

@ -2,22 +2,18 @@ name: Shellcheck
on:
pull_request:
paths:
- '**.sh'
push:
branches:
- main
paths:
- '**.sh'
jobs:
shellcheck:
name: Check Shell
name: Github Actions
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- uses: actions/checkout@v3
- name: Run Shellcheck
uses: ludeeus/action-shellcheck@00cae500b08a931fb5698e11e79bfbd38e612a38 # v2.0.0
uses: ludeeus/action-shellcheck@master
with:
check_together: 'yes'
env:

23
.github/workflows/spellcheck.yml vendored Normal file
View file

@ -0,0 +1,23 @@
name: 'Spellcheck'
on:
pull_request:
push:
branches:
- main
jobs:
spellcheck:
name: Check spelling
runs-on: ubuntu-latest
steps:
- name: spelling or typos
uses: actions/checkout@v3
- name: fix permission for reviewdog
run: sudo chown -R root:root $GITHUB_WORKSPACE
- name: misspell
uses: reviewdog/action-misspell@v1
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
locale: "US"
fail_on_error: true

View file

@ -1,56 +0,0 @@
name: talk-update
on:
workflow_dispatch:
schedule:
- cron: '00 12 * * *'
jobs:
talk-update:
name: update talk
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Run talk-container-update
run: |
# Recording
recording_version="$(
git ls-remote https://github.com/nextcloud/nextcloud-talk-recording v* \
| cut -d/ -f3 \
| sort -V \
| grep -E "^v[0-9\.]+$" \
| tail -1
)"
sed -i "s|^ENV RECORDING_VERSION.*$|ENV RECORDING_VERSION=$recording_version|" ./Containers/talk-recording/Dockerfile
curl -L "https://raw.githubusercontent.com/nextcloud/nextcloud-talk-recording/$recording_version/server.conf.in" -o Containers/talk-recording/recording.conf
# Signaling
signaling_version="$(
git ls-remote https://github.com/strukturag/nextcloud-spreed-signaling v*.*.* \
| cut -d/ -f3 \
| sort -V \
| grep -E "^v[0-9]+\.[0-9]+\.[0-9]+$" \
| tail -1
)"
curl -L "https://raw.githubusercontent.com/strukturag/nextcloud-spreed-signaling/$signaling_version/server.conf.in" -o Containers/talk/server.conf.in
# Janus
janus_version="$(
git ls-remote https://github.com/meetecho/janus-gateway v1.*.* \
| cut -d/ -f3 \
| sort -V \
| grep -E "^v[0-9]+\.[0-9]+\.[0-9]+$" \
| tail -1
)"
sed -i "s|^ARG JANUS_VERSION=.*$|ARG JANUS_VERSION=$janus_version|" ./Containers/talk/Dockerfile
- name: Create Pull Request
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v7
with:
commit-message: talk-update automated change
signoff: true
title: talk container update
body: Automated talk container update
labels: dependencies, 3. to review
milestone: next
branch: talk-container-update

View file

@ -1,40 +0,0 @@
name: Twig Lint
on:
pull_request:
paths:
- '**.twig'
push:
branches:
- main
paths:
- '**.twig'
permissions:
contents: read
concurrency:
group: lint-twig-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
twig-lint:
runs-on: ubuntu-latest
name: twig-lint
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Set up php ${{ matrix.php-versions }}
uses: shivammathur/setup-php@7bf05c6b704e0b9bfee22300130a31b5ea68d593 # v2
with:
php-version: 8.4
extensions: apcu
coverage: none
- name: twig lint
run: |
cd php
composer install
composer run lint:twig

View file

@ -1,11 +0,0 @@
name: Update Copyright
on:
workflow_dispatch:
jobs:
update-copyright:
name: update copyright
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2

View file

@ -1,35 +0,0 @@
name: Update Helm Chart
on:
workflow_dispatch:
schedule:
- cron: '00 12 * * *'
jobs:
update-helm:
name: update helm chart
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: update helm chart
run: |
set -x
GHCR_TOKEN="$(curl https://ghcr.io/token?scope=repository:nextcloud-releases/nce-php-fpm-mgmt:pull | jq '.token' | sed 's|"||g')"
DOCKER_TAG="$(curl -H "Authorization: Bearer ${GHCR_TOKEN}" -L -s 'https://ghcr.io/v2/nextcloud-releases/all-in-one/tags/list?page_size=1024' | jq '.tags' | sed 's|"||g;s|[[:space:]]||g;s|,||g' | grep '^20[0-9_]\+' | grep -v latest | sort -r | head -1)"
export DOCKER_TAG
set +x
if [ -n "$DOCKER_TAG" ] && ! grep -q "aio-nextcloud:$DOCKER_TAG" ./nextcloud-aio-helm-chart/templates/nextcloud-aio-nextcloud-deployment.yaml; then
sudo bash nextcloud-aio-helm-chart/update-helm.sh "$DOCKER_TAG"
fi
- name: Create Pull Request
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v7
with:
commit-message: Helm Chart updates
signoff: true
title: Helm Chart updates
body: Automated Helm Chart updates for the yaml files. It can be merged if it looks good at any time which will automatically trigger a new release of the helm chart.
labels: dependencies, 3. to review
milestone: next
branch: aio-helm-update
token: ${{ secrets.GITHUB_TOKEN }}

View file

@ -6,23 +6,22 @@ on:
- cron: '00 12 * * *'
jobs:
update-yaml:
psalm:
name: update yaml files
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
uses: actions/checkout@v3
- name: update yaml files
run: |
sudo bash manual-install/update-yaml.sh
- name: Create Pull Request
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v7
uses: peter-evans/create-pull-request@v4
with:
commit-message: Yaml updates
signoff: true
title: Yaml updates
body: Automated yaml updates for the docker-compose files. Should only be merged shortly before the next latest release.
labels: dependencies, 3. to review
labels: dependencies
milestone: next
branch: aio-yaml-update
token: ${{ secrets.GITHUB_TOKEN }}

View file

@ -1,37 +0,0 @@
name: watchtower-update
on:
workflow_dispatch:
schedule:
- cron: '00 12 * * *'
jobs:
watchtower-update:
name: update watchtower
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Run watchtower-container-update
run: |
# Watchtower
watchtower_version="$(
git ls-remote https://github.com/nicholas-fedor/watchtower v* \
| cut -d/ -f3 \
| sort -V \
| grep -E "^v[0-9\.]+$" \
| tail -1
)"
watchtower_commit_hash="$(git ls-remote https://github.com/nicholas-fedor/watchtower $watchtower_version | sed 's/refs.*//')"
sed -i "s|^ENV WATCHTOWER_COMMIT_HASH.*$|ENV WATCHTOWER_COMMIT_HASH=$watchtower_commit_hash|" ./Containers/watchtower/Dockerfile
sed -i "s|\$WATCHTOWER_COMMIT_HASH.*$|\$WATCHTOWER_COMMIT_HASH # $watchtower_version|" ./Containers/watchtower/Dockerfile
- name: Create Pull Request
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v7
with:
commit-message: watchtower-update automated change
signoff: true
title: watchtower container update
body: Automated watchtower container update
labels: dependencies, 3. to review
milestone: next
branch: watchtower-container-update

15
.gitignore vendored
View file

@ -1,15 +1,8 @@
.DS_Store
.idea/
*.iml
/php/data/*
/php/session/*
!/php/data/.gitkeep
!/php/session/.gitkeep
/php/data/containers.json
/php/data/configuration.json
/php/data/backupsecret.json
/php/vendor
/manual-install/*.conf
!/manual-install/sample.conf
/manual-install/docker-compose.yml
/manual-install/compose.yaml
/manual-install/.env
/manual-install/docker-compose.yml

1
CODEOWNERS Normal file
View file

@ -0,0 +1 @@
* @szaimen @juliushaertl

View file

@ -1,13 +0,0 @@
<!--
- SPDX-FileCopyrightText: 2018 Nextcloud GmbH and Nextcloud contributors
- SPDX-License-Identifier: AGPL-3.0-or-later
-->
In the Nextcloud community, participants from all over the world come together to create Free Software for a free internet. This is made possible by the support, hard work and enthusiasm of thousands of people, including those who create and use Nextcloud software.
Our code of conduct offers some guidance to ensure Nextcloud participants can cooperate effectively in a positive and inspiring atmosphere, and to explain how together we can strengthen and support each other.
The Code of Conduct is shared by all contributors and users who engage with the Nextcloud team and its community services. It presents a summary of the shared values and “common sense” thinking in our community.
You can find our full code of conduct on our website: https://nextcloud.com/code-of-conduct/
Please, keep our CoC in mind when you contribute! That way, everyone can be a part of our community in a productive, positive, creative and fun way.

View file

@ -1,7 +0,0 @@
# syntax=docker/dockerfile:latest
FROM alpine:3.23.3
RUN set -ex; \
apk upgrade --no-cache -a
LABEL org.label-schema.vendor="Nextcloud"

View file

@ -4,67 +4,63 @@
storage file_system {
root /mnt/data/caddy
}
servers {
# trusted_proxies placeholder
}
log {
level ERROR
}
}
https://{$ADDITIONAL_TRUSTED_DOMAIN}:443,
http://{$APACHE_HOST}:23973, # For Collabora callback and WOPI requests, see containers.json
{$PROTOCOL}://{$NC_DOMAIN}:{$APACHE_PORT} {
header -Server
header -X-Powered-By
# Collabora
route /browser/* {
reverse_proxy {$COLLABORA_HOST}:9980
}
route /hosting/* {
reverse_proxy {$COLLABORA_HOST}:9980
}
route /cool/* {
reverse_proxy {$COLLABORA_HOST}:9980
}
# Notify Push
route /push/* {
uri strip_prefix /push
reverse_proxy {$NOTIFY_PUSH_HOST}:7867
}
# Onlyoffice
route /onlyoffice/* {
uri strip_prefix /onlyoffice
reverse_proxy {$ONLYOFFICE_HOST}:80 {
header_up X-Forwarded-Host {http.request.hostport}/onlyoffice
header_up X-Forwarded-Proto https
reverse_proxy {$NEXTCLOUD_HOST}:7867 {
# trusted_proxies placeholder
}
}
# Talk
route /standalone-signaling/* {
uri strip_prefix /standalone-signaling
reverse_proxy {$TALK_HOST}:8081
reverse_proxy {$TALK_HOST}:8081 {
# trusted_proxies placeholder
}
}
# Whiteboard
route /whiteboard/* {
uri strip_prefix /whiteboard
reverse_proxy {$WHITEBOARD_HOST}:3002
# Collabora
route /browser/* {
reverse_proxy {$COLLABORA_HOST}:9980 {
# trusted_proxies placeholder
}
}
route /hosting/* {
reverse_proxy {$COLLABORA_HOST}:9980 {
# trusted_proxies placeholder
}
}
route /cool/* {
reverse_proxy {$COLLABORA_HOST}:9980 {
# trusted_proxies placeholder
}
}
# Onlyoffice
route /onlyoffice/* {
uri strip_prefix /onlyoffice
reverse_proxy {$ONLYOFFICE_HOST}:80 {
header_up X-Forwarded-Host {http.request.host}/onlyoffice
header_up X-Forwarded-Proto https
# trusted_proxies placeholder
}
}
# Nextcloud
route {
rewrite /.well-known/carddav /remote.php/dav
rewrite /.well-known/caldav /remote.php/dav
header Strict-Transport-Security max-age=31536000;
reverse_proxy 127.0.0.1:8000
reverse_proxy localhost:8000 {
# See https://github.com/nextcloud/all-in-one/issues/828
# trusted_proxies placeholder
}
}
redir /.well-known/carddav /remote.php/dav/ 301
redir /.well-known/caldav /remote.php/dav/ 301
# TLS options
tls {

View file

@ -1,92 +1,80 @@
# syntax=docker/dockerfile:latest
FROM caddy:2.10.2-alpine AS caddy
# Caddy is a requirement
FROM caddy:2.5.2-alpine as caddy
# From https://github.com/docker-library/httpd/blob/master/2.4/alpine/Dockerfile
FROM httpd:2.4.66-alpine3.23
FROM debian:bullseye-20220822-slim
COPY --from=caddy /usr/bin/caddy /usr/bin/caddy
COPY --chown=33:33 Caddyfile /Caddyfile
COPY --chmod=664 nextcloud.conf /usr/local/apache2/conf/nextcloud.conf
COPY --chmod=664 supervisord.conf /supervisord.conf
COPY --chmod=775 start.sh /start.sh
COPY --chmod=775 healthcheck.sh /healthcheck.sh
RUN mkdir -p /mnt/data; \
chown www-data:www-data /mnt/data;
VOLUME /mnt/data
RUN set -ex; \
apk upgrade --no-cache -a; \
apk add --no-cache shadow; \
groupmod -g 33 www-data; \
usermod -u 33 -g 33 www-data; \
apk del --no-cache shadow; \
\
mkdir -p /mnt/data; \
chown -R www-data:www-data /mnt/data; \
chown -R 777 /tmp; \
\
apk add --no-cache \
bash \
apt-get update; \
apt-get install -y --no-install-recommends \
apache2 \
supervisor \
tzdata \
wget \
ca-certificates \
openssl \
bind-tools \
netcat-openbsd; \
\
sed -i \
-e '/^Listen /d' \
-e 's/^#\(LoadModule .*mod_rewrite.so\)/\1/' \
-e 's/^#\(LoadModule .*mod_headers.so\)/\1/' \
-e 's/^#\(LoadModule .*mod_proxy.so\)/\1/' \
-e 's/^#\(LoadModule .*mod_proxy_fcgi.so\)/\1/' \
-e 's/^#\(LoadModule .*mod_setenvif.so\)/\1/' \
-e 's/^#\(LoadModule .*mod_env.so\)/\1/' \
-e 's/^#\(LoadModule .*mod_mime.so\)/\1/' \
-e 's/^#\(LoadModule .*mod_dir.so\)/\1/' \
-e 's/^#\(LoadModule .*mod_authz_core.so\)/\1/' \
-e 's/^#\(LoadModule .*mod_alias.so\)/\1/' \
-e 's/^#\(LoadModule .*mod_mpm_event.so\)/\1/' \
-e 's/^#\(LoadModule .*mod_brotli.so\)/\1/' \
-e 's/\(LoadModule .*mod_mpm_worker.so\)/#\1/' \
-e 's/\(LoadModule .*mod_mpm_prefork.so\)/#\1/' \
-e 's/\(ScriptAlias \)/#\1/' \
/usr/local/apache2/conf/httpd.conf; \
echo "Include conf/nextcloud.conf" | tee -a /usr/local/apache2/conf/httpd.conf; \
echo "ServerName localhost" | tee -a /usr/local/apache2/conf/httpd.conf; \
# Sync this with max db connections and pm.max_children
# We don't actually expect so many workers but don't want to limit it artificially because people will report issues otherwise.
sed -i 's|MaxRequestWorkers.*|MaxRequestWorkers 5000|' /usr/local/apache2/conf/extra/httpd-mpm.conf; \
grep -q '<IfModule mpm_event_module>' /usr/local/apache2/conf/extra/httpd-mpm.conf; \
# ServerLimit needs to be set to MaxRequestWorkers divided by ThreadsPerChild which is set to 25 by default
sed -i '/<IfModule mpm_event_module>/a\ \ \ \ ServerLimit 200' /usr/local/apache2/conf/extra/httpd-mpm.conf; \
\
rm -rf /usr/local/apache2/conf/original /var/www; \
mkdir -p /var/www; \
chown -R www-data:www-data /var/www; \
\
mkdir /var/log/supervisord; \
netcat \
dpkg-dev \
curl \
; \
rm -rf /var/lib/apt/lists/*
COPY --from=caddy /usr/bin/caddy /usr/bin/
RUN chmod +x /usr/bin/caddy
RUN a2enmod rewrite \
headers \
proxy \
proxy_fcgi \
setenvif \
env \
mime \
dir \
authz_core \
alias
COPY nextcloud.conf /etc/apache2/sites-available/
RUN rm /etc/apache2/ports.conf; \
sed -s -i -e "s/Include ports.conf//" /etc/apache2/apache2.conf; \
sed -i "/^Listen /d" /etc/apache2/apache2.conf
RUN set -ex; \
a2dissite 000-default && \
a2dissite default-ssl && \
a2ensite nextcloud.conf && \
rm -rf /var/www/html/* && \
chown www-data:www-data -R /var/log/apache2; \
mkdir -p /var/run/apache2; \
chown -R www-data:www-data /var/run/apache2; \
chown -R www-data:www-data /var/www;
RUN mkdir /var/log/supervisord; \
mkdir /var/run/supervisord; \
chown www-data:www-data /var/run/supervisord; \
chown www-data:www-data /var/log/supervisord; \
chmod 777 /var/run/supervisord; \
chmod 777 /var/log/supervisord; \
\
chown -R www-data:www-data /usr/local/apache2; \
chmod +r -R /usr/local/apache2; \
mkdir -p /usr/local/apache2/logs; \
chmod 777 -R /home/www-data; \
chmod 777 -R /usr/local/apache2/logs; \
rm -rf /usr/local/apache2/cgi-bin/; \
\
echo "root:$(openssl rand -base64 12)" | chpasswd
chown www-data:www-data /var/log/supervisord;
USER 33
COPY Caddyfile /
ENTRYPOINT ["/start.sh"]
COPY start.sh /usr/bin/
COPY healthcheck.sh /usr/bin/
COPY supervisord.conf /
RUN chmod +x /usr/bin/start.sh; \
chmod +x /usr/bin/healthcheck.sh; \
chmod +r /supervisord.conf; \
chown www-data:www-data /Caddyfile; \
chmod +r -R /etc/apache2
# Give root a random password
RUN echo "root:$(openssl rand -base64 12)" | chpasswd
USER www-data
ENTRYPOINT ["start.sh"]
CMD ["/usr/bin/supervisord", "-c", "/supervisord.conf"]
HEALTHCHECK CMD /healthcheck.sh
LABEL com.centurylinklabs.watchtower.enable="false" \
wud.watch="false" \
org.label-schema.vendor="Nextcloud"
HEALTHCHECK CMD healthcheck.sh

View file

@ -1,5 +1,8 @@
#!/bin/bash
nc -z "$NEXTCLOUD_HOST" 9000 || exit 0
nc -z 127.0.0.1 8000 || exit 1
nc -z 127.0.0.1 "$APACHE_PORT" || exit 1
curl -skfI localhost:8000 || exit 1
if [ "$APACHE_PORT" != '443' ]; then
curl -skfI localhost:"$APACHE_PORT" || exit 1
else
curl -skfI https://"$NC_DOMAIN":"$APACHE_PORT" || exit 1
fi

View file

@ -1,28 +1,9 @@
Listen 8000
<VirtualHost *:8000>
ServerName localhost
# Add error log
CustomLog /proc/self/fd/1 proxy
LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\"" proxy
ErrorLog /proc/self/fd/2
ErrorLogFormat "[%t] [%l] [%E] [client: %{X-Forwarded-For}i] [%M] [%{User-Agent}i]"
LogLevel warn
# PHP match
<FilesMatch "\.php$">
SetHandler "proxy:fcgi://${NEXTCLOUD_HOST}:9000"
SetHandler "proxy:fcgi://nextcloud-aio-nextcloud:9000"
</FilesMatch>
<Proxy "fcgi://${NEXTCLOUD_HOST}:9000" flushpackets=on>
</Proxy>
# Enable Brotli compression for js, css and svg files - other plain files are compressed by Nextcloud by default
<IfModule mod_brotli.c>
AddOutputFilterByType BROTLI_COMPRESS text/javascript application/javascript application/x-javascript text/css image/svg+xml
BrotliCompressionQuality 0
</IfModule>
# Nextcloud dir
DocumentRoot /var/www/html/
<Directory /var/www/html/>
@ -40,15 +21,10 @@ Listen 8000
Require all denied
</Files>
# Fix zero file sizes
# See https://github.com/nextcloud/server/issues/3056#issuecomment-954209565
SetEnv proxy-sendcl 1
# See https://httpd.apache.org/docs/current/en/mod/core.html#limitrequestbody
LimitRequestBody ${APACHE_MAX_SIZE}
# See https://httpd.apache.org/docs/current/mod/core.html#timeout
Timeout ${APACHE_MAX_TIME}
# See https://httpd.apache.org/docs/current/mod/mod_proxy.html#proxytimeout
ProxyTimeout ${APACHE_MAX_TIME}
# See https://httpd.apache.org/docs/trunk/mod/core.html#traceenable
TraceEnable Off
LimitRequestBody 0
</VirtualHost>

View file

@ -17,13 +17,6 @@ while ! nc -z "$NEXTCLOUD_HOST" 9000; do
sleep 5
done
# Get ipv4-address of Apache
# shellcheck disable=SC2153
IPv4_ADDRESS="$(dig "$APACHE_HOST" A +short +search | head -1)"
# Bring it in CIDR notation
# shellcheck disable=SC2001
IPv4_ADDRESS="$(echo "$IPv4_ADDRESS" | sed 's|[0-9]\+$|0/16|')"
if [ -z "$APACHE_PORT" ]; then
export APACHE_PORT="443"
fi
@ -42,36 +35,20 @@ if [ "$APACHE_PORT" != '443' ]; then
else
CADDYFILE="$(sed 's|auto_https.*|auto_https disable_redirects|' /Caddyfile)"
fi
echo "$CADDYFILE" > /tmp/Caddyfile
echo "$CADDYFILE" > /Caddyfile
# Change the trusted_proxies in case of reverse proxies
if [ "$APACHE_PORT" != '443' ]; then
# Here the 100.64.0.0/10 range gets added which is the CGNAT range used by Tailscale nodes
# See https://github.com/nextcloud/all-in-one/pull/6703 for reference
CADDYFILE="$(sed 's|# trusted_proxies placeholder|trusted_proxies static private_ranges 100.64.0.0/10|' /tmp/Caddyfile)"
CADDYFILE="$(sed 's|# trusted_proxies placeholder|trusted_proxies private_ranges|' /Caddyfile)"
else
CADDYFILE="$(sed "s|# trusted_proxies placeholder|trusted_proxies static $IPv4_ADDRESS|" /tmp/Caddyfile)"
CADDYFILE="$(sed 's|trusted_proxies private_ranges|# trusted_proxies placeholder|' /Caddyfile)"
fi
echo "$CADDYFILE" > /tmp/Caddyfile
# Remove additional domain if not given
if [ -z "$ADDITIONAL_TRUSTED_DOMAIN" ]; then
CADDYFILE="$(sed '/ADDITIONAL_TRUSTED_DOMAIN/d' /tmp/Caddyfile)"
fi
echo "$CADDYFILE" > /tmp/Caddyfile
# Fix the Caddyfile format
caddy fmt --overwrite /tmp/Caddyfile
echo "$CADDYFILE" > /Caddyfile
# Add caddy path
mkdir -p /mnt/data/caddy/
# Fix caddy startup
if [ -d "/mnt/data/caddy/locks" ]; then
rm -rf /mnt/data/caddy/locks/*
fi
# Fix apache sturtup
rm -f /var/run/apache2/apache2.pid
# Fix apache startup
rm -f /usr/local/apache2/logs/httpd.pid
exec "$@"
exec "$@"

View file

@ -1,23 +1,23 @@
[supervisord]
nodaemon=true
nodaemon=true
logfile=/var/log/supervisord/supervisord.log
pidfile=/var/run/supervisord/supervisord.pid
childlogdir=/var/log/supervisord/
logfile_maxbytes=50MB
logfile_backups=10
loglevel=error
[program:apache]
# Stdout logging is disabled as otherwise the logs are spammed
stdout_logfile=NONE
stderr_logfile=/dev/stderr
stderr_logfile_maxbytes=0
command=apachectl -DFOREGROUND
[program:caddy]
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stderr
stderr_logfile_maxbytes=0
command=/usr/bin/caddy run --config /tmp/Caddyfile
[supervisord]
nodaemon=true
nodaemon=true
logfile=/var/log/supervisord/supervisord.log
pidfile=/var/run/supervisord/supervisord.pid
childlogdir=/var/log/supervisord/
logfile_maxbytes=50MB
logfile_backups=10
loglevel=error
[program:apache]
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stderr
stderr_logfile_maxbytes=0
command=apachectl -DFOREGROUND
[program:caddy]
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stderr
stderr_logfile_maxbytes=0
command=/usr/bin/caddy run -config /Caddyfile

View file

@ -1,29 +1,23 @@
# syntax=docker/dockerfile:latest
FROM alpine:3.23.3
FROM debian:bullseye-20220822-slim
RUN set -ex; \
\
apk upgrade --no-cache -a; \
apk add --no-cache \
util-linux-misc \
bash \
apt-get update; \
apt-get install -y --no-install-recommends \
borgbackup \
rsync \
fuse \
py3-llfuse \
python3-llfuse \
jq \
openssh-client
; \
rm -rf /var/lib/apt/lists/*
VOLUME /root
COPY --chmod=770 *.sh /
COPY borg_excludes /
COPY start.sh /usr/bin/
COPY backupscript.sh /
RUN chmod +x /usr/bin/start.sh; \
chmod +x /backupscript.sh
ENTRYPOINT ["/start.sh"]
# hadolint ignore=DL3002
USER root
LABEL com.centurylinklabs.watchtower.enable="false" \
wud.watch="false" \
org.label-schema.vendor="Nextcloud"
ENV BORG_RETENTION_POLICY="--keep-within=7d --keep-weekly=4 --keep-monthly=6"
ENTRYPOINT ["start.sh"]

View file

@ -24,34 +24,22 @@ for directory in "${VOLUME_DIRS[@]}"; do
exit 1
fi
done
# Test if default volumes are there
DEFAULT_VOLUMES=(nextcloud_aio_apache nextcloud_aio_nextcloud nextcloud_aio_database nextcloud_aio_database_dump nextcloud_aio_elasticsearch nextcloud_aio_nextcloud_data nextcloud_aio_mastercontainer)
for volume in "${DEFAULT_VOLUMES[@]}"; do
if ! mountpoint -q "/nextcloud_aio_volumes/$volume"; then
echo "$volume is missing which is not intended."
exit 1
fi
done
# Check if target is mountpoint
if [ -z "$BORG_REMOTE_REPO" ] && ! mountpoint -q "$MOUNT_DIR"; then
echo "$MOUNT_DIR is not a mountpoint which is not allowed."
if ! mountpoint -q /mnt/borgbackup; then
echo "/mnt/borgbackup is not a mountpoint which is not allowed"
exit 1
fi
# Check if repo is uninitialized
if [ "$BORG_MODE" != backup ] && [ "$BORG_MODE" != test ] && ! borg info > /dev/null; then
if [ -n "$BORG_REMOTE_REPO" ]; then
echo "The repository is uninitialized or cannot connect to remote. Cannot perform check or restore."
else
echo "The repository is uninitialized. Cannot perform check or restore."
fi
# Check if target is empty
if [ "$BORG_MODE" != backup ] && [ "$BORG_MODE" != test ] && ! [ -f "$BORG_BACKUP_DIRECTORY/config" ]; then
echo "The repository is empty. cannot perform check or restore."
exit 1
fi
# Do not continue if this file exists (needed for simple external blocking)
if [ -z "$BORG_REMOTE_REPO" ] && [ -f "$BORG_BACKUP_DIRECTORY/aio-lockfile" ]; then
echo "Not continuing because aio-lockfile exists it seems like a script is externally running which is locking the backup archive."
if [ -f "$BORG_BACKUP_DIRECTORY/aio-lockfile" ]; then
echo "Not continuing because aio-lockfile exists - it seems like a script is externally running which is locking the backup archive."
echo "If this should not be the case, you can fix this by deleting the 'aio-lockfile' file from the backup archive directory."
exit 1
fi
@ -61,15 +49,6 @@ if [ "$BORG_MODE" = backup ] || [ "$BORG_MODE" = restore ]; then
touch "/nextcloud_aio_volumes/nextcloud_aio_database_dump/backup-is-running"
fi
if [ -n "$BORG_REMOTE_REPO" ] && ! [ -f "$BORGBACKUP_KEY" ]; then
echo "First run, creating borg ssh key"
ssh-keygen -f "$BORGBACKUP_KEY" -N ""
echo "You should configure the remote to accept this public key"
fi
if [ -n "$BORG_REMOTE_REPO" ] && [ -f "$BORGBACKUP_KEY.pub" ]; then
echo "Your public ssh key for borgbackup is: $(cat "$BORGBACKUP_KEY.pub")"
fi
# Do the backup
if [ "$BORG_MODE" = backup ]; then
@ -78,97 +57,62 @@ if [ "$BORG_MODE" = backup ]; then
echo "configuration.json not present. Cannot perform the backup!"
exit 1
elif ! [ -f "/nextcloud_aio_volumes/nextcloud_aio_nextcloud/config/config.php" ]; then
echo "config.php is missing. Cannot perform backup!"
echo "config.php is missing cannot perform backup"
exit 1
elif ! [ -f "/nextcloud_aio_volumes/nextcloud_aio_database_dump/database-dump.sql" ]; then
echo "database-dump is missing. Cannot perform backup!"
echo "Please check the database container logs!"
exit 1
elif ! [ -f "/nextcloud_aio_volumes/nextcloud_aio_nextcloud_data/.ocdata" ] && ! [ -f "/nextcloud_aio_volumes/nextcloud_aio_nextcloud_data/.ncdata" ]; then
echo "The .ncdata or .ocdata file is missing in Nextcloud datadir which means it is invalid!"
echo "Is the drive where the datadir is located on still mounted?"
echo "database-dump is missing. cannot perform backup"
exit 1
fi
# Test that default volumes are not empty
for volume in "${DEFAULT_VOLUMES[@]}"; do
if [ -z "$(ls -A "/nextcloud_aio_volumes/$volume")" ] && [ "$volume" != "nextcloud_aio_elasticsearch" ]; then
echo "/nextcloud_aio_volumes/$volume is empty which should not happen!"
# Test that nothing is empty
for directory in "${VOLUME_DIRS[@]}"; do
if [ -z "$(ls -A "$directory")" ]; then
echo "$directory is empty which is not allowed."
exit 1
fi
done
if [ -f "/nextcloud_aio_volumes/nextcloud_aio_database_dump/export.failed" ]; then
echo "Database export failed the last time. Most likely was the export time not high enough."
echo "Cannot create a backup now."
echo "Reason is that the database export failed the last time."
echo "Most likely was the database container not correctly shut down via the AIO interface."
echo ""
echo "You might want to try the database export again manually by running the three commands:"
echo "sudo docker start nextcloud-aio-database"
echo "sleep 10"
echo "sudo docker stop nextcloud-aio-database -t 1800"
echo ""
echo "Afterwards try to create a backup again and it should hopefully work."
echo "If it should still fail, feel free to report this to https://github.com/nextcloud/all-in-one/issues and post the database container logs and the borgbackup container logs into the thread. Thanks!"
echo "Please report this to https://github.com/nextcloud/all-in-one/issues. Thanks!"
exit 1
fi
if [ -z "$BORG_REMOTE_REPO" ]; then
# Create backup folder
mkdir -p "$BORG_BACKUP_DIRECTORY"
fi
# Create backup folder
mkdir -p "$BORG_BACKUP_DIRECTORY"
# Initialize the repository if can't get info from target
if ! borg info > /dev/null; then
# Initialize the repository if the target is empty
if ! [ -f "$BORG_BACKUP_DIRECTORY/config" ]; then
# Don't initialize if already initialized
if [ -f "/nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/borg.config" ]; then
if [ -n "$BORG_REMOTE_REPO" ]; then
echo "Borg could not get info from the remote repo."
echo "This might be a failure to connect to the remote server. See the above borg info output for details."
else
echo "Borg could not get info from the targeted directory."
echo "This might happen if the targeted directory is located on an external drive and the drive not connected anymore. You should check this."
fi
echo "If you instead want to initialize a new backup repository, you may delete the 'borg.config' file that is stored in the mastercontainer volume manually, which will allow you to initialize a new borg repository in the chosen directory:"
echo "sudo docker exec nextcloud-aio-mastercontainer rm /mnt/docker-aio-config/data/borg.config"
echo "Cannot initialize a new repository as that was already done at least one time."
exit 1
fi
echo "Initializing repository..."
NEW_REPOSITORY=1
if ! borg init --debug --encryption=repokey-blake2; then
echo "initializing repository..."
if ! borg init --debug --encryption=repokey-blake2 "$BORG_BACKUP_DIRECTORY"; then
echo "Could not initialize borg repository."
rm -f "$BORG_BACKUP_DIRECTORY/config"
exit 1
fi
borg config "$BORG_BACKUP_DIRECTORY" additional_free_space 2G
if [ -z "$BORG_REMOTE_REPO" ]; then
# borg config only works for local repos; it's up to the remote to ensure the disk isn't full
borg config :: additional_free_space 2G
# Fix too large Borg cache
# https://borgbackup.readthedocs.io/en/stable/faq.html#the-borg-cache-eats-way-too-much-disk-space-what-can-i-do
BORG_ID="$(borg config "$BORG_BACKUP_DIRECTORY" id)"
rm -r "/root/.cache/borg/$BORG_ID/chunks.archive.d"
touch "/root/.cache/borg/$BORG_ID/chunks.archive.d"
# Fix too large Borg cache
# https://borgbackup.readthedocs.io/en/stable/faq.html#the-borg-cache-eats-way-too-much-disk-space-what-can-i-do
BORG_ID="$(borg config :: id)"
rm -r "/root/.cache/borg/$BORG_ID/chunks.archive.d"
touch "/root/.cache/borg/$BORG_ID/chunks.archive.d"
fi
if ! borg info > /dev/null; then
echo "Borg can't get info from the repo it created. Something is wrong."
# Make a backup from the borg config file
if ! [ -f "$BORG_BACKUP_DIRECTORY/config" ]; then
echo "The borg config file wasn't created. Something is wrong."
exit 1
fi
rm -f "/nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/borg.config"
if [ -n "$BORG_REMOTE_REPO" ]; then
# `borg config` does not support remote repos so instead create a dummy file and rely on the remote to avoid
# corruption of the config file (which contains the encryption key). We don't actually use the contents of
# this file anywhere, so a touch is all we need so we remember we already initialized the repo.
touch "/nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/borg.config"
else
# Make a backup from the borg config file
if ! cp "$BORG_BACKUP_DIRECTORY/config" "/nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/borg.config"; then
echo "Could not copy config file to second place. Cannot perform backup."
exit 1
fi
if ! cp "$BORG_BACKUP_DIRECTORY/config" "/nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/borg.config"; then
echo "Could not copy config file to second place. Cannot perform backup."
exit 1
fi
echo "Repository successfully initialized."
@ -180,47 +124,15 @@ if [ "$BORG_MODE" = backup ]; then
# Borg options
# auto,zstd compression seems to has the best ratio based on:
# https://forum.level1techs.com/t/optimal-compression-for-borg-backups/145870/6
BORG_OPTS=(-v --stats --compression "auto,zstd")
if [ "$NEW_REPOSITORY" = 1 ]; then
BORG_OPTS+=(--progress)
fi
# Exclude the nextcloud log and audit log for GDPR reasons
BORG_EXCLUDE=(--exclude "/nextcloud_aio_volumes/nextcloud_aio_nextcloud/data/nextcloud.log*" --exclude "/nextcloud_aio_volumes/nextcloud_aio_nextcloud/data/audit.log" --exclude "/nextcloud_aio_volumes/nextcloud_aio_nextcloud_data/lost+found")
BORG_INCLUDE=()
# Exclude datadir if .noaiobackup file was found
# shellcheck disable=SC2144
if [ -f "/nextcloud_aio_volumes/nextcloud_aio_nextcloud_data/.noaiobackup" ]; then
BORG_EXCLUDE+=(--exclude "/nextcloud_aio_volumes/nextcloud_aio_nextcloud_data/")
BORG_INCLUDE+=(--pattern="+/nextcloud_aio_volumes/nextcloud_aio_nextcloud_data/.noaiobackup")
echo "⚠️⚠️⚠️ '.noaiobackup' file was found in Nextcloud's data directory. Excluding the data directory from backup!"
# Exclude preview folder if .noaiobackup file was found
elif [ -f /nextcloud_aio_volumes/nextcloud_aio_nextcloud_data/appdata_*/preview/.noaiobackup ]; then
BORG_EXCLUDE+=(--exclude "/nextcloud_aio_volumes/nextcloud_aio_nextcloud_data/appdata_*/preview/")
BORG_INCLUDE+=(--pattern="+/nextcloud_aio_volumes/nextcloud_aio_nextcloud_data/appdata_*/preview/.noaiobackup")
echo "⚠️⚠️⚠️ '.noaiobackup' file was found in the preview directory. Excluding the preview directory from backup!"
fi
# Make sure that there is always a borg.config file before creating a new backup
if ! [ -f "/nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/borg.config" ]; then
echo "Did not find borg.config file in the mastercontainer volume."
echo "Cannot create a backup as this is wrong."
exit 1
fi
BORG_OPTS=(--stats --progress --compression "auto,zstd" --exclude-caches --checkpoint-interval 86400)
# Create the backup
echo "Starting the backup..."
get_start_time
if ! borg create "${BORG_OPTS[@]}" "${BORG_INCLUDE[@]}" "${BORG_EXCLUDE[@]}" "::$CURRENT_DATE-nextcloud-aio" "/nextcloud_aio_volumes/" --exclude-from /borg_excludes; then
if ! borg create "${BORG_OPTS[@]}" "$BORG_BACKUP_DIRECTORY::$CURRENT_DATE-nextcloud-aio" "/nextcloud_aio_volumes/"; then
echo "Deleting the failed backup archive..."
borg delete --stats "::$CURRENT_DATE-nextcloud-aio"
borg delete --stats --progress "$BORG_BACKUP_DIRECTORY::$CURRENT_DATE-nextcloud-aio"
echo "Backup failed!"
echo "You might want to check the backup integrity via the AIO interface."
if [ "$NEW_REPOSITORY" = 1 ]; then
echo "Deleting borg.config file so that you can choose a different location for the backup."
rm "/nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/borg.config"
fi
exit 1
fi
@ -228,23 +140,15 @@ if [ "$BORG_MODE" = backup ]; then
rm -f "/nextcloud_aio_volumes/nextcloud_aio_nextcloud_data/skip.update"
# Prune options
read -ra BORG_PRUNE_OPTS <<< "$BORG_RETENTION_POLICY"
echo "BORG_PRUNE_OPTS are ${BORG_PRUNE_OPTS[*]}"
BORG_PRUNE_OPTS=(--stats --progress --keep-within=7d --keep-weekly=4 --keep-monthly=6 "$BORG_BACKUP_DIRECTORY")
# Prune archives
echo "Pruning the archives..."
if ! borg prune --stats --glob-archives '*_*-nextcloud-aio' "${BORG_PRUNE_OPTS[@]}"; then
if ! borg prune --prefix '*_*-nextcloud-aio' "${BORG_PRUNE_OPTS[@]}"; then
echo "Failed to prune archives!"
exit 1
fi
# Compact archives
echo "Compacting the archives..."
if ! borg compact; then
echo "Failed to compact archives!"
exit 1
fi
# Back up additional directories of the host
if [ "$ADDITIONAL_DIRECTORIES_BACKUP" = 'yes' ]; then
if [ -d "/docker_volumes/" ]; then
@ -256,23 +160,17 @@ if [ "$BORG_MODE" = backup ]; then
exit 1
fi
done
echo "Starting the backup for additional volumes..."
if ! borg create "${BORG_OPTS[@]}" "::$CURRENT_DATE-additional-docker-volumes" "/docker_volumes/"; then
if ! borg create "${BORG_OPTS[@]}" "$BORG_BACKUP_DIRECTORY::$CURRENT_DATE-additional-docker-volumes" "/docker_volumes/"; then
echo "Deleting the failed backup archive..."
borg delete --stats "::$CURRENT_DATE-additional-docker-volumes"
borg delete --stats --progress "$BORG_BACKUP_DIRECTORY::$CURRENT_DATE-additional-docker-volumes"
echo "Backup of additional docker-volumes failed!"
exit 1
fi
echo "Pruning additional volumes..."
if ! borg prune --stats --glob-archives '*_*-additional-docker-volumes' "${BORG_PRUNE_OPTS[@]}"; then
if ! borg prune --prefix '*_*-additional-docker-volumes' "${BORG_PRUNE_OPTS[@]}"; then
echo "Failed to prune additional docker-volumes archives!"
exit 1
fi
echo "Compacting additional volumes..."
if ! borg compact; then
echo "Failed to compact additional docker-volume archives!"
exit 1
fi
fi
if [ -d "/host_mounts/" ]; then
EXCLUDED_DIRECTORIES=(home/*/.cache root/.cache var/cache lost+found run var/run dev tmp sys proc)
@ -286,29 +184,22 @@ if [ "$BORG_MODE" = backup ]; then
do
EXCLUDE_DIRS+=(--exclude "/host_mounts/$directory/")
done
echo "Starting the backup for additional host mounts..."
if ! borg create "${BORG_OPTS[@]}" "${EXCLUDE_DIRS[@]}" "::$CURRENT_DATE-additional-host-mounts" "/host_mounts/"; then
if ! borg create "${BORG_OPTS[@]}" "${EXCLUDE_DIRS[@]}" "$BORG_BACKUP_DIRECTORY::$CURRENT_DATE-additional-host-mounts" "/host_mounts/"; then
echo "Deleting the failed backup archive..."
borg delete --stats "::$CURRENT_DATE-additional-host-mounts"
borg delete --stats --progress "$BORG_BACKUP_DIRECTORY::$CURRENT_DATE-additional-host-mounts"
echo "Backup of additional host-mounts failed!"
exit 1
fi
echo "Pruning additional host mounts..."
if ! borg prune --stats --glob-archives '*_*-additional-host-mounts' "${BORG_PRUNE_OPTS[@]}"; then
if ! borg prune --prefix '*_*-additional-host-mounts' "${BORG_PRUNE_OPTS[@]}"; then
echo "Failed to prune additional host-mount archives!"
exit 1
fi
echo "Compacting additional host mounts..."
if ! borg compact; then
echo "Failed to compact additional host-mount archives!"
exit 1
fi
fi
fi
# Inform user
get_expiration_time
echo "Backup finished successfully on $END_DATE_READABLE ($DURATION_READABLE)."
echo "Backup finished successfully on $END_DATE_READABLE ($DURATION_READABLE)"
if [ -f "/nextcloud_aio_volumes/nextcloud_aio_nextcloud_data/update.failed" ]; then
echo "However a Nextcloud update failed. So reporting that the backup failed which will skip any update attempt the next time."
echo "Please restore a backup from before the failed Nextcloud update attempt."
@ -321,38 +212,17 @@ fi
if [ "$BORG_MODE" = restore ]; then
get_start_time
# Pick archive to restore
# Perform the restore
if [ -n "$SELECTED_RESTORE_TIME" ]; then
SELECTED_ARCHIVE="$(borg list | grep "nextcloud-aio" | grep "$SELECTED_RESTORE_TIME" | awk -F " " '{print $1}' | head -1)"
SELECTED_ARCHIVE="$(borg list "$BORG_BACKUP_DIRECTORY" | grep "nextcloud-aio" | grep "$SELECTED_RESTORE_TIME" | awk -F " " '{print $1}' | head -1)"
else
SELECTED_ARCHIVE="$(borg list | grep "nextcloud-aio" | awk -F " " '{print $1}' | sort -r | head -1)"
SELECTED_ARCHIVE="$(borg list "$BORG_BACKUP_DIRECTORY" | grep "nextcloud-aio" | awk -F " " '{print $1}' | sort -r | head -1)"
fi
echo "Restoring '$SELECTED_ARCHIVE'..."
ADDITIONAL_RSYNC_EXCLUDES=()
ADDITIONAL_BORG_EXCLUDES=()
ADDITIONAL_FIND_EXCLUDES=()
# Exclude datadir if .noaiobackup file was found
# shellcheck disable=SC2144
if [ -f "/nextcloud_aio_volumes/nextcloud_aio_nextcloud_data/.noaiobackup" ]; then
# Keep these 3 in sync. Beware, the pattern syntax and the paths differ
ADDITIONAL_RSYNC_EXCLUDES=(--exclude "nextcloud_aio_nextcloud_data/**")
ADDITIONAL_BORG_EXCLUDES=(--exclude "sh:nextcloud_aio_volumes/nextcloud_aio_nextcloud_data/**")
ADDITIONAL_FIND_EXCLUDES=(-o -regex 'nextcloud_aio_volumes/nextcloud_aio_nextcloud_data\(/.*\)?')
echo "⚠️⚠️⚠️ '.noaiobackup' file was found in Nextcloud's data directory. Excluding the data directory from restore!"
echo "You might run into problems due to this afterwards as potentially this makes the directory go out of sync with the database."
echo "You might be able to fix this by running 'occ files:scan --all' and 'occ maintenance:repair' and 'occ files:scan-app-data' after the restore."
echo "See https://github.com/nextcloud/all-in-one#how-to-run-occ-commands"
# Exclude previews from restore if selected to speed up process or exclude preview folder if .noaiobackup file was found
elif [ -n "$RESTORE_EXCLUDE_PREVIEWS" ] || [ -f /nextcloud_aio_volumes/nextcloud_aio_nextcloud_data/appdata_*/preview/.noaiobackup ]; then
# Keep these 3 in sync. Beware, the pattern syntax and the paths differ
ADDITIONAL_RSYNC_EXCLUDES=(--exclude "nextcloud_aio_nextcloud_data/appdata_*/preview/**")
ADDITIONAL_BORG_EXCLUDES=(--exclude "sh:nextcloud_aio_volumes/nextcloud_aio_nextcloud_data/appdata_*/preview/**")
ADDITIONAL_FIND_EXCLUDES=(-o -regex 'nextcloud_aio_volumes/nextcloud_aio_nextcloud_data/appdata_[^/]*/preview\(/.*\)?')
echo "⚠️⚠️⚠️ Excluding previews from restore!"
echo "You might run into problems due to this afterwards as potentially this makes the directory go out of sync with the database."
echo "You might be able to fix this by running 'occ files:scan-app-data preview' after the restore."
echo "See https://github.com/nextcloud/all-in-one#how-to-run-occ-commands"
mkdir -p /tmp/borg
if ! borg mount "$BORG_BACKUP_DIRECTORY::$SELECTED_ARCHIVE" /tmp/borg; then
echo "Could not mount the backup!"
exit 1
fi
# Save Additional Backup dirs
@ -365,12 +235,23 @@ if [ "$BORG_MODE" = restore ]; then
DAILY_BACKUPTIME="$(cat /nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/daily_backup_time)"
fi
# Restore everything except the configuration file
if ! rsync --stats --archive --human-readable -vv --delete \
--exclude "nextcloud_aio_mastercontainer/session/"** \
--exclude "nextcloud_aio_mastercontainer/certs/"** \
--exclude "nextcloud_aio_mastercontainer/data/daily_backup_running" \
--exclude "nextcloud_aio_mastercontainer/data/configuration.json" \
/tmp/borg/nextcloud_aio_volumes/ /nextcloud_aio_volumes; then
echo "Something failed while restoring from backup."
umount /tmp/borg
exit 1
fi
# Save current aio password
AIO_PASSWORD="$(jq '.password' /nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/configuration.json)"
# Save current backup location vars
# Save current path
BORG_LOCATION="$(jq '.borg_backup_host_location' /nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/configuration.json)"
REMOTE_REPO="$(jq '.borg_remote_repo' /nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/configuration.json)"
# Save current nextcloud datadir
if grep -q '"nextcloud_datadir":' /nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/configuration.json; then
@ -379,116 +260,22 @@ if [ "$BORG_MODE" = restore ]; then
NEXTCLOUD_DATADIR='""'
fi
if [ -z "$BORG_REMOTE_REPO" ]; then
mkdir -p /tmp/borg
if ! borg mount "::$SELECTED_ARCHIVE" /tmp/borg; then
echo "Could not mount the backup!"
exit 1
fi
# Restore everything except the configuration file
#
# These exclude patterns need to be kept in sync with the borg_excludes file and the find excludes in this file,
# which use a different syntax (patterns appear in 3 places in total)
if ! rsync --stats --archive --human-readable -vv --delete \
--exclude "nextcloud_aio_apache/caddy/**" \
--exclude "nextcloud_aio_mastercontainer/caddy/**" \
--exclude "nextcloud_aio_nextcloud/data/nextcloud.log*" \
--exclude "nextcloud_aio_nextcloud/data/audit.log" \
--exclude "nextcloud_aio_mastercontainer/certs/**" \
--exclude "nextcloud_aio_mastercontainer/data/configuration.json" \
--exclude "nextcloud_aio_mastercontainer/data/daily_backup_running" \
--exclude "nextcloud_aio_mastercontainer/data/session_date_file" \
--exclude "nextcloud_aio_mastercontainer/session/**" \
--exclude "nextcloud_aio_nextcloud_data/lost+found" \
"${ADDITIONAL_RSYNC_EXCLUDES[@]}" \
/tmp/borg/nextcloud_aio_volumes/ /nextcloud_aio_volumes/; then
RESTORE_FAILED=1
echo "Something failed while restoring from backup."
fi
# Restore the configuration file
if ! rsync --archive --human-readable -vv \
/tmp/borg/nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/configuration.json \
/nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/configuration.json; then
RESTORE_FAILED=1
echo "Something failed while restoring the configuration.json."
fi
if ! umount /tmp/borg; then
echo "Failed to unmount the borg archive but should still be able to restore successfully"
fi
else
# Restore nearly everything
#
# borg mount is really slow for remote repos (did not check whether it's slow for local repos too),
# using extract to /tmp would require temporarily storing a second copy of the data.
# So instead extract directly on top of the destination with exclude patterns for the config, but
# then we do still need to delete local files which are not present in the archive.
#
# Older backups may still contain files we've since excluded, so we have to exclude on extract as well.
cd / # borg extract has no destination arg and extracts to CWD
if ! borg extract "::$SELECTED_ARCHIVE" --progress --exclude-from /borg_excludes "${ADDITIONAL_BORG_EXCLUDES[@]}" --pattern '+nextcloud_aio_volumes/**'
then
RESTORE_FAILED=1
echo "Failed to extract backup archive."
else
# Delete files/dirs present locally, but not in the backup archive, excluding conf files
# https://unix.stackexchange.com/a/759341
# This comm does not support -z, but I doubt any file names would have \n in them
#
# These find patterns need to be kept in sync with the borg_excludes file and the rsync excludes in this
# file, which use a different syntax (patterns appear in 3 places in total)
echo "Deleting local files which do not exist in the backup"
if ! find nextcloud_aio_volumes \
-not \( \
-path nextcloud_aio_volumes/nextcloud_aio_apache/caddy \
-o -path "nextcloud_aio_volumes/nextcloud_aio_apache/caddy/*" \
-o -path nextcloud_aio_volumes/nextcloud_aio_mastercontainer/caddy \
-o -path "nextcloud_aio_volumes/nextcloud_aio_mastercontainer/caddy/*" \
-o -path nextcloud_aio_volumes/nextcloud_aio_mastercontainer/certs \
-o -path "nextcloud_aio_volumes/nextcloud_aio_mastercontainer/certs/*" \
-o -path nextcloud_aio_volumes/nextcloud_aio_mastercontainer/session \
-o -path "nextcloud_aio_volumes/nextcloud_aio_mastercontainer/session/*" \
-o -path "nextcloud_aio_volumes/nextcloud_aio_nextcloud/data/nextcloud.log*" \
-o -path nextcloud_aio_volumes/nextcloud_aio_nextcloud/data/audit.log \
-o -path nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/daily_backup_running \
-o -path nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/session_date_file \
-o -path "nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/id_borg*" \
-o -path "nextcloud_aio_nextcloud_data/lost+found" \
"${ADDITIONAL_FIND_EXCLUDES[@]}" \
\) \
| LC_ALL=C sort \
| LC_ALL=C comm -23 - \
<(borg list "::$SELECTED_ARCHIVE" --short --exclude-from /borg_excludes --pattern '+nextcloud_aio_volumes/**' | LC_ALL=C sort) \
> /tmp/local_files_not_in_backup
then
RESTORE_FAILED=1
echo "Failed to delete local files not in backup archive."
else
# More robust than e.g. xargs as I got a ~"args line too long" error while testing that, but it's slower
# https://stackoverflow.com/a/21848934
while IFS= read -r file
do rm -vrf -- "$file" || DELETE_FAILED=1
done < /tmp/local_files_not_in_backup
if [ "$DELETE_FAILED" = 1 ]; then
RESTORE_FAILED=1
echo "Failed to delete (some) local files not in backup archive."
fi
fi
fi
# Restore the configuration file
if ! rsync --archive --human-readable -vv \
/tmp/borg/nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/configuration.json \
/nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/configuration.json; then
echo "Something failed while restoring the configuration.json."
umount /tmp/borg
exit 1
fi
# Set backup-mode to restore since it was a restore
CONTENTS="$(jq '."backup-mode" = "restore"' /nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/configuration.json)"
echo -E "${CONTENTS}" > /nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/configuration.json
# Reset the backup location vars to the currently used one
# Reset the backup path to the currently used one
CONTENTS="$(jq ".borg_backup_host_location = $BORG_LOCATION" /nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/configuration.json)"
echo -E "${CONTENTS}" > /nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/configuration.json
CONTENTS="$(jq ".borg_remote_repo = $REMOTE_REPO" /nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/configuration.json)"
echo -E "${CONTENTS}" > /nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/configuration.json
# Reset the AIO password to the currently used one
CONTENTS="$(jq ".password = $AIO_PASSWORD" /nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/configuration.json)"
@ -512,13 +299,11 @@ if [ "$BORG_MODE" = restore ]; then
chmod 770 "/nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/daily_backup_time"
fi
if [ "$RESTORE_FAILED" = 1 ]; then
exit 1
fi
umount /tmp/borg
# Inform user
get_expiration_time
echo "Restore finished successfully on $END_DATE_READABLE ($DURATION_READABLE)."
echo "Restore finished successfully on $END_DATE_READABLE ($DURATION_READABLE)"
# Add file to Nextcloud container so that it skips any update the next time
touch "/nextcloud_aio_volumes/nextcloud_aio_nextcloud_data/skip.update"
@ -527,15 +312,6 @@ if [ "$BORG_MODE" = restore ]; then
# Add file to Nextcloud container so that it performs a fingerprint update the next time
touch "/nextcloud_aio_volumes/nextcloud_aio_nextcloud_data/fingerprint.update"
chmod 777 "/nextcloud_aio_volumes/nextcloud_aio_nextcloud_data/fingerprint.update"
# Add file to Netcloud container to trigger a preview scan the next time it starts
if [ -n "$RESTORE_EXCLUDE_PREVIEWS" ]; then
touch "/nextcloud_aio_volumes/nextcloud_aio_nextcloud_data/trigger-preview.scan"
chmod 777 "/nextcloud_aio_volumes/nextcloud_aio_nextcloud_data/trigger-preview.scan"
fi
# Delete redis cache
rm -f "/mnt/redis/dump.rdb"
fi
# Do the Backup check
@ -544,80 +320,37 @@ if [ "$BORG_MODE" = check ]; then
echo "Checking the backup integrity..."
# Perform the check
if ! borg check -v --verify-data; then
if ! borg check --verify-data --progress "$BORG_BACKUP_DIRECTORY"; then
echo "Some errors were found while checking the backup integrity!"
echo "Check the AIO interface for advice on how to proceed now!"
exit 1
fi
# Inform user
get_expiration_time
echo "Check finished successfully on $END_DATE_READABLE ($DURATION_READABLE)."
exit 0
fi
# Do the Backup check-repair
if [ "$BORG_MODE" = "check-repair" ]; then
get_start_time
echo "Checking the backup integrity and repairing it..."
# Perform the check-repair
if ! echo YES | borg check -v --repair; then
echo "Some errors were found while checking and repairing the backup integrity!"
exit 1
fi
# Inform user
get_expiration_time
echo "Check finished successfully on $END_DATE_READABLE ($DURATION_READABLE)."
echo "Check finished successfully on $END_DATE_READABLE ($DURATION_READABLE)"
exit 0
fi
# Do the backup test
if [ "$BORG_MODE" = test ]; then
if [ -n "$BORG_REMOTE_REPO" ]; then
if ! borg info > /dev/null; then
echo "Borg could not get info from the remote repo."
echo "See the above borg info output for details."
exit 1
fi
else
if ! [ -d "$BORG_BACKUP_DIRECTORY" ]; then
echo "No 'borg' directory in the given backup directory found!"
echo "Only the files/folders below have been found in the given directory."
ls -a "$MOUNT_DIR"
echo "Please adjust the directory so that the borg archive is positioned in a folder named 'borg' inside the given directory!"
exit 1
elif ! [ -f "$BORG_BACKUP_DIRECTORY/config" ]; then
echo "A 'borg' directory was found but could not find the borg archive."
echo "Only the files/folders below have been found in the borg directory."
ls -a "$BORG_BACKUP_DIRECTORY"
echo "The archive and most importantly the config file must be positioned directly in the 'borg' subfolder."
exit 1
fi
fi
if ! borg list >/dev/null; then
if ! [ -d "$BORG_BACKUP_DIRECTORY" ]; then
echo "No 'borg' directory in the given backup directory found!"
echo "Only the files/folders below have been found in the given directory."
ls -a "$MOUNT_DIR"
echo "Please adjust the directory so that the borg archive is positioned in a folder named 'borg' inside the given directory!"
exit 1
elif ! [ -f "$BORG_BACKUP_DIRECTORY/config" ]; then
echo "A 'borg' directory was found but could not find the borg archive."
echo "Only the files/folders below have been found in the borg directory."
ls -a "$BORG_BACKUP_DIRECTORY"
echo "The archive and most importantly the config file must be positioned directly in the 'borg' subfolder."
exit 1
elif ! borg list "$BORG_BACKUP_DIRECTORY"; then
echo "The entered path seems to be valid but could not open the backup archive."
echo "Most likely the entered password was wrong so please adjust it accordingly!"
exit 1
else
if ! borg list | grep "nextcloud-aio"; then
echo "The backup archive does not contain a valid Nextcloud AIO backup."
echo "Most likely was the archive not created via Nextcloud AIO."
exit 1
else
echo "Everything looks fine so feel free to continue!"
exit 0
fi
echo "Everything looks fine so feel free to continue!"
exit 0
fi
fi
if [ "$BORG_MODE" = list ]; then
echo "Updating backup list..."
if ! borg info > /dev/null; then
echo "Could not update the backup list."
exit 1
fi
# The update gets done automatically in the wrapper start.sh script.
fi

View file

@ -1,11 +0,0 @@
# These patterns need to be kept in sync with rsync and find excludes in backupscript.sh,
# which use a different syntax (patterns appear in 3 places in total)
nextcloud_aio_volumes/nextcloud_aio_apache/caddy/
nextcloud_aio_volumes/nextcloud_aio_mastercontainer/caddy/
nextcloud_aio_volumes/nextcloud_aio_nextcloud/data/nextcloud.log*
nextcloud_aio_volumes/nextcloud_aio_nextcloud/data/audit.log
nextcloud_aio_volumes/nextcloud_aio_mastercontainer/certs/
nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/daily_backup_running
nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/session_date_file
nextcloud_aio_volumes/nextcloud_aio_mastercontainer/session/
nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/id_borg*

View file

@ -2,7 +2,7 @@
# Variables
export MOUNT_DIR="/mnt/borgbackup"
export BORG_BACKUP_DIRECTORY="$MOUNT_DIR/borg" # necessary even when remote to store the aio-lockfile
export BORG_BACKUP_DIRECTORY="$MOUNT_DIR/borg"
# Validate BORG_PASSWORD
if [ -z "$BORG_PASSWORD" ] && [ -z "$BACKUP_RESTORE_PASSWORD" ]; then
@ -18,22 +18,10 @@ else
fi
export BORG_UNKNOWN_UNENCRYPTED_REPO_ACCESS_IS_OK=yes
export BORG_RELOCATED_REPO_ACCESS_IS_OK=yes
if [ -n "$BORG_REMOTE_REPO" ]; then
export BORG_REPO="$BORG_REMOTE_REPO"
# Location to create the borg ssh pub/priv key
export BORGBACKUP_KEY="/nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/id_borg"
# Accept any host key the first time connecting to the remote. Strictly speaking should be provided by user but you'd
# have to be very unlucky to get MitM'ed on your first connection.
export BORG_RSH="ssh -o StrictHostKeyChecking=accept-new -i $BORGBACKUP_KEY"
else
export BORG_REPO="$BORG_BACKUP_DIRECTORY"
fi
# Validate BORG_MODE
if [ "$BORG_MODE" != backup ] && [ "$BORG_MODE" != restore ] && [ "$BORG_MODE" != check ] && [ "$BORG_MODE" != "check-repair" ] && [ "$BORG_MODE" != "test" ] && [ "$BORG_MODE" != "list" ]; then
echo "No correct BORG_MODE mode applied. Valid are 'backup', 'check', 'restore', 'test' and 'list'."
if [ "$BORG_MODE" != backup ] && [ "$BORG_MODE" != restore ] && [ "$BORG_MODE" != check ] && [ "$BORG_MODE" != test ]; then
echo "No correct BORG_MODE mode applied. Valid are 'backup', 'check', 'restore' and 'test'."
exit 1
fi
@ -48,8 +36,8 @@ fi
rm -f "/nextcloud_aio_volumes/nextcloud_aio_database_dump/backup-is-running"
# Get a list of all available borg archives
if borg list &>/dev/null; then
borg list | grep "nextcloud-aio" | awk -F " " '{print $1","$3,$4}' > "/nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/backup_archives.list"
if borg list "$BORG_BACKUP_DIRECTORY" &>/dev/null; then
borg list "$BORG_BACKUP_DIRECTORY" | grep "nextcloud-aio" | awk -F " " '{print $1","$3,$4}' > "/nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/backup_archives.list"
else
echo "" > "/nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/backup_archives.list"
fi

View file

@ -1,38 +1,6 @@
# syntax=docker/dockerfile:latest
FROM alpine:3.23.3
# Probably from this file: https://github.com/Cisco-Talos/clamav/blob/main/Dockerfile
FROM clamav/clamav:0.105.1
RUN set -ex; \
apk upgrade --no-cache -a; \
apk add --no-cache tzdata clamav clamav-milter supervisor bash; \
mkdir -p /tmp /var/lib/clamav /run/clamav /var/log/supervisord /var/run/supervisord; \
chmod 777 -R /tmp /run/clamav /var/log/clamav /var/log/supervisord /var/run/supervisord; \
chown -R 100:100 /var/lib/clamav; \
sed -i "s|#\?MaxDirectoryRecursion.*|MaxDirectoryRecursion 30|g" /etc/clamav/clamd.conf; \
sed -i "s|#\?MaxScanSize.*|MaxScanSize 2000M|g" /etc/clamav/clamd.conf; \
sed -i "s|#\?MaxFileSize.*|MaxFileSize 2000M|g" /etc/clamav/clamd.conf; \
sed -i "s|#\?PCREMaxFileSize.*|PCREMaxFileSize 2000M|g" /etc/clamav/clamd.conf; \
# StreamMaxLength must be synced with av_stream_max_length inside the Nextcloud files_antivirus plugin
sed -i "s|#\?StreamMaxLength.*|StreamMaxLength 2000M|g" /etc/clamav/clamd.conf; \
sed -i "s|#\?TCPSocket|TCPSocket|g" /etc/clamav/clamd.conf; \
sed -i "s|^LocalSocket .*|LocalSocket /tmp/clamd.sock|g" /etc/clamav/clamd.conf; \
sed -i "s|Example| |g" /etc/clamav/clamav-milter.conf; \
sed -i "s|#\?MilterSocket inet:7357|MilterSocket inet:7357|g" /etc/clamav/clamav-milter.conf; \
sed -i "s|#\?ClamdSocket unix:/run/clamav/clamd.sock|ClamdSocket unix:/tmp/clamd.sock|g" /etc/clamav/clamav-milter.conf; \
sed -i "s|#\?OnInfected Quarantine|OnInfected Reject|g" /etc/clamav/clamav-milter.conf; \
sed -i "s|#\?AddHeader Replace|AddHeader Add|g" /etc/clamav/clamav-milter.conf; \
sed -i "s|#\?Foreground yes|Foreground yes|g" /etc/clamav/clamav-milter.conf
COPY --chmod=775 start.sh /start.sh
COPY --chmod=775 healthcheck.sh /healthcheck.sh
COPY --chmod=664 supervisord.conf /supervisord.conf
USER 100
RUN set -ex; \
freshclam --foreground --stdout
VOLUME /var/lib/clamav
ENTRYPOINT ["/start.sh"]
CMD ["/usr/bin/supervisord", "-c", "/supervisord.conf"]
LABEL com.centurylinklabs.watchtower.enable="false" \
wud.watch="false" \
org.label-schema.vendor="Nextcloud"
HEALTHCHECK --start-period=60s --retries=9 CMD /healthcheck.sh
RUN apk add --update --no-cache tzdata
COPY clamav.conf /tmp/
RUN cat /tmp/clamav.conf >> /etc/clamav/clamd.conf

View file

@ -0,0 +1,4 @@
MaxDirectoryRecursion 30
MaxFileSize 100M
PCREMaxFileSize 100M
StreamMaxLength 100M

View file

@ -1,9 +0,0 @@
#!/bin/bash
if [ "$(echo "PING" | nc 127.0.0.1 3310)" != "PONG" ]; then
echo "ERROR: Unable to contact server"
exit 1
fi
echo "Clamd is up"
exit 0

View file

@ -1,8 +0,0 @@
#!/bin/bash
# Print out clamav version for compliance reasons
clamscan --version
echo "Clamav started"
exec "$@"

View file

@ -1,30 +0,0 @@
[supervisord]
nodaemon=true
nodaemon=true
logfile=/var/log/supervisord/supervisord.log
pidfile=/var/run/supervisord/supervisord.pid
childlogdir=/var/log/supervisord/
logfile_maxbytes=50MB
logfile_backups=10
loglevel=error
[program:freshclam]
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stderr
stderr_logfile_maxbytes=0
command=freshclam --foreground --stdout --daemon --daemon-notify=/etc/clamav/clamd.conf
[program:clamd]
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stderr
stderr_logfile_maxbytes=0
command=clamd --foreground --config-file=/etc/clamav/clamd.conf
[program:milter]
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stderr
stderr_logfile_maxbytes=0
command=clamav-milter --config-file=/etc/clamav/clamav-milter.conf

View file

@ -1,16 +0,0 @@
# syntax=docker/dockerfile:latest
# From https://gitlab.collabora.com/collabora-online/docker
# hadolint ignore=DL3007
FROM registry.gitlab.collabora.com/collabora-online/docker:latest
USER root
ARG DEBIAN_FRONTEND=noninteractive
COPY --chmod=775 healthcheck.sh /healthcheck.sh
USER 1001
HEALTHCHECK --start-period=60s --retries=9 CMD /healthcheck.sh
LABEL com.centurylinklabs.watchtower.enable="false" \
wud.watch="false" \
org.label-schema.vendor="Nextcloud"

View file

@ -1,7 +0,0 @@
#!/bin/bash
# Unfortunately, no curl and no nc is installed in the container
# and packages can also not be added as the package list is broken.
# So always exiting 0 for now.
# nc http://127.0.0.1:9980 || exit 1
exit 0

View file

@ -1,15 +1,17 @@
# syntax=docker/dockerfile:latest
# From a file located probably somewhere here: https://github.com/CollaboraOnline/online/blob/master/docker/from-packages/Dockerfile
FROM collabora/code:25.04.8.2.1
# From a file located probably somewhere here: https://github.com/CollaboraOnline/online/tree/master/docker
FROM collabora/code:22.05.5.4.1
USER root
ARG DEBIAN_FRONTEND=noninteractive
COPY --chmod=775 healthcheck.sh /healthcheck.sh
RUN set -ex; \
\
apt-get update; \
export DEBIAN_FRONTEND=noninteractive; \
apt-get install -y --no-install-recommends \
tzdata \
; \
rm -rf /var/lib/apt/lists/*
USER 1001
USER 104
HEALTHCHECK --start-period=60s --retries=9 CMD /healthcheck.sh
LABEL com.centurylinklabs.watchtower.enable="false" \
wud.watch="false" \
org.label-schema.vendor="Nextcloud"
HEALTHCHECK CMD curl -skfI localhost:9980 || exit 1

View file

@ -1,7 +0,0 @@
#!/bin/bash
# Unfortunately, no curl and no nc is installed in the container
# and packages can also not be added as the package list is broken.
# So always exiting 0 for now.
# nc http://127.0.0.1:9980 || exit 1
exit 0

View file

@ -1,23 +0,0 @@
# syntax=docker/dockerfile:latest
FROM haproxy:3.3.2-alpine
# hadolint ignore=DL3002
USER root
ENV NEXTCLOUD_HOST=nextcloud-aio-nextcloud
RUN set -ex; \
apk upgrade --no-cache -a; \
apk add --no-cache \
ca-certificates \
tzdata \
bash \
bind-tools; \
chmod -R 777 /tmp
COPY --chmod=775 *.sh /
COPY --chmod=664 haproxy.cfg /haproxy.cfg
ENTRYPOINT ["/start.sh"]
HEALTHCHECK CMD /healthcheck.sh
LABEL com.centurylinklabs.watchtower.enable="false" \
wud.watch="false" \
org.label-schema.vendor="Nextcloud"

View file

@ -1,68 +0,0 @@
# Inspiration: https://github.com/Tecnativa/docker-socket-proxy/blob/master/haproxy.cfg
global
maxconn 10
defaults
timeout connect 30s
timeout client 30s
timeout server 1800s
frontend http
mode http
bind :::2375 v4v6
http-request deny unless { src 127.0.0.1 } || { src ::1 } || { src NC_IPV4_PLACEHOLDER } || { src NC_IPV6_PLACEHOLDER }
# docker system _ping
http-request allow if { path,url_dec -m reg -i ^(/v[\d\.]+)?/_ping$ } METH_GET
# docker inspect image: GET images/%s/json
http-request allow if { path,url_dec -m reg -i ^(/v[\d\.]+)?/images/.*/json } METH_GET
# container inspect: GET containers/%s/json
http-request allow if { path,url_dec -m reg -i ^(/v[\d\.]+)?/containers/nc_app_[a-zA-Z0-9_.-]+/json } METH_GET
# container inspect: GET containers/%s/logs
http-request allow if { path,url_dec -m reg -i ^(/v[\d\.]+)?/containers/nc_app_[a-zA-Z0-9_.-]+/logs } METH_GET
# container start/stop: POST containers/%s/start containers/%s/stop
http-request allow if { path,url_dec -m reg -i ^(/v[\d\.]+)?/containers/nc_app_[a-zA-Z0-9_.-]+/((start)|(stop)) } METH_POST
# container rm: DELETE containers/%s
http-request allow if { path,url_dec -m reg -i ^(/v[\d\.]+)?/containers/nc_app_[a-zA-Z0-9_.-]+ } METH_DELETE
# container update/exec: POST containers/%s/update containers/%s/exec
http-request allow if { path,url_dec -m reg -i ^(/v[\d\.]+)?/containers/nc_app_[a-zA-Z0-9_.-]+/((update)|(exec)) } METH_POST
# container put: PUT containers/%s/archive
http-request allow if { path,url_dec -m reg -i ^(/v[\d\.]+)?/containers/nc_app_[a-zA-Z0-9_.-]+/archive } METH_PUT
# run exec instance: POST exec/%s
http-request allow if { path,url_dec -m reg -i ^(/v[\d\.]+)?/exec/[a-zA-Z0-9_.-]+/start } METH_POST
# container create: POST containers/create?name=%s
# ACL to restrict container name to nc_app_[a-zA-Z0-9_.-]+
acl nc_app_container_name url_param(name) -m reg -i "^nc_app_[a-zA-Z0-9_.-]+"
# ACL to restrict the number of Mounts to 1
acl one_mount_volume req.body -m reg -i "\"Mounts\"\s*:\s*\[\s*(?:(?!\"Mounts\"\s*:\s*\[)[^}]*)}[^}]*\]"
# ACL to deny if there are any binds
acl binds_present req.body -m reg -i "\"HostConfig\"\s*:.*\"Binds\"\s*:"
# ACL to restrict the type of Mounts to volume
acl type_not_volume req.body -m reg -i "\"Mounts\"\s*:\s*\[[^\]]*(\"Type\"\s*:\s*\"(?!volume\b)\w+\"[^\]]*)+\]"
http-request deny if { path,url_dec -m reg -i ^(/v[\d\.]+)?/containers/create } nc_app_container_name !one_mount_volume binds_present type_not_volume METH_POST
# ACL to restrict container creation, that it has HostConfig.Privileged(by searching for "Privileged" word in all payload)
acl no_privileged_flag req.body -m reg -i "\"Privileged\""
# ACL to allow mount volume with strict pattern for name: nc_app_[a-zA-Z0-9_.-]+_data
acl nc_app_volume_data_only req.body -m reg -i "\"Mounts\"\s*:\s*\[\s*{[^}]*\"Source\"\s*:\s*\"nc_app_[a-zA-Z0-9_.-]+_data\""
http-request allow if { path,url_dec -m reg -i ^(/v[\d\.]+)?/containers/create } nc_app_container_name !no_privileged_flag nc_app_volume_data_only METH_POST
# end of container create
# volume create: POST volumes/create
# restrict name
acl nc_app_volume_data req.body -m reg -i "\"Name\"\s*:\s*\"nc_app_[a-zA-Z0-9_.-]+_data\""
# do not allow to use "device" word e.g., "--opt device=:/path/to/dir"
acl volume_no_device req.body -m reg -i "\"device\""
http-request allow if { path,url_dec -m reg -i ^(/v[\d\.]+)?/volumes/create } nc_app_volume_data !volume_no_device METH_POST
# volume rm: DELETE volumes/%s
http-request allow if { path,url_dec -m reg -i ^(/v[\d\.]+)?/volumes/nc_app_[a-zA-Z0-9_.-]+_data } METH_DELETE
# image pull: POST images/create?fromImage=%s
http-request allow if { path,url_dec -m reg -i ^(/v[\d\.]+)?/images/create } METH_POST
http-request deny
default_backend dockerbackend
backend dockerbackend
mode http
server dockersocket /var/run/docker.sock

View file

@ -1,4 +0,0 @@
#!/bin/bash
nc -z "$NEXTCLOUD_HOST" 9001 || exit 0
nc -z 127.0.0.1 2375 || exit 1

View file

@ -1,23 +0,0 @@
#!/bin/sh
# Only start container if nextcloud is accessible
while ! nc -z "$NEXTCLOUD_HOST" 9001; do
echo "Waiting for Nextcloud to start..."
sleep 5
done
set -x
IPv4_ADDRESS_NC="$(dig nextcloud-aio-nextcloud IN A +short +search | grep '^[0-9.]\+$' | sort | head -n1)"
HAPROXYFILE="$(sed "s|NC_IPV4_PLACEHOLDER|$IPv4_ADDRESS_NC|" /haproxy.cfg)"
echo "$HAPROXYFILE" > /tmp/haproxy.cfg
IPv6_ADDRESS_NC="$(dig nextcloud-aio-nextcloud AAAA +short +search | grep '^[0-9a-f:]\+$' | sort | head -n1)"
if [ -n "$IPv6_ADDRESS_NC" ]; then
HAPROXYFILE="$(sed "s|NC_IPV6_PLACEHOLDER|$IPv6_ADDRESS_NC|" /tmp/haproxy.cfg)"
else
HAPROXYFILE="$(sed "s# || { src NC_IPV6_PLACEHOLDER }##g" /tmp/haproxy.cfg)"
fi
echo "$HAPROXYFILE" > /tmp/haproxy.cfg
set +x
haproxy -f /tmp/haproxy.cfg -db

View file

@ -1,22 +1,18 @@
# syntax=docker/dockerfile:latest
FROM alpine:3.23.3
RUN set -ex; \
apk upgrade --no-cache -a; \
apk add --no-cache bash lighttpd netcat-openbsd; \
adduser -S www-data -G www-data; \
rm -rf /etc/lighttpd/lighttpd.conf; \
chmod 777 -R /etc/lighttpd; \
mkdir -p /var/www/domaincheck; \
chown www-data:www-data -R /var/www; \
chmod 777 -R /var/www/domaincheck
COPY --chown=www-data:www-data lighttpd.conf /lighttpd.conf
FROM alpine:3.16.2
RUN apk add --update --no-cache lighttpd bash curl
COPY --chmod=775 start.sh /start.sh
RUN adduser -S www-data -G www-data
RUN rm -rf /etc/lighttpd/lighttpd.conf
COPY lighttpd.conf /etc/lighttpd/lighttpd.conf
RUN chmod +r -R /etc/lighttpd && \
chown www-data:www-data -R /var/www && \
chown www-data:www-data /etc/lighttpd/lighttpd.conf
COPY start.sh /
RUN chmod +x /start.sh
USER www-data
RUN mkdir -p /var/www/domaincheck/
ENTRYPOINT ["/start.sh"]
HEALTHCHECK CMD nc -z 127.0.0.1 $APACHE_PORT || exit 1
LABEL com.centurylinklabs.watchtower.enable="false" \
wud.watch="false" \
org.label-schema.vendor="Nextcloud"
HEALTHCHECK CMD curl -skfI localhost:$APACHE_PORT || exit 1

View file

@ -11,7 +11,7 @@ if [ -z "$APACHE_PORT" ]; then
export APACHE_PORT="443"
fi
CONF_FILE="$(sed "s|ipv6-placeholder|\[::\]:$APACHE_PORT|" /lighttpd.conf)"
CONF_FILE="$(sed "s|ipv6-placeholder|\[::\]:$APACHE_PORT|" /etc/lighttpd/lighttpd.conf)"
echo "$CONF_FILE" > /etc/lighttpd/lighttpd.conf
# Check config file

View file

@ -1,27 +1,6 @@
# syntax=docker/dockerfile:latest
# Probably from here https://github.com/elastic/elasticsearch/blob/main/distribution/docker/src/docker/Dockerfile
FROM elasticsearch:8.19.10
FROM elasticsearch:7.17.6
USER root
RUN elasticsearch-plugin install --batch ingest-attachment
ARG DEBIAN_FRONTEND=noninteractive
# hadolint ignore=DL3008
RUN set -ex; \
\
apt-get update; \
apt-get upgrade -y; \
apt-get install -y --no-install-recommends \
tzdata \
; \
rm -rf /var/lib/apt/lists/*;
COPY --chmod=775 healthcheck.sh /healthcheck.sh
USER 1000:0
HEALTHCHECK --interval=10s --timeout=5s --start-period=1m --retries=5 CMD /healthcheck.sh
LABEL com.centurylinklabs.watchtower.enable="false" \
wud.watch="false" \
org.label-schema.vendor="Nextcloud"
ENV ES_JAVA_OPTS="-Xms512M -Xmx512M"
HEALTHCHECK CMD curl -skfI localhost:9200 || exit 1

View file

@ -1,3 +0,0 @@
#!/bin/bash
nc -z 127.0.0.1 9200 || exit 1

View file

@ -1,47 +1,15 @@
# syntax=docker/dockerfile:latest
FROM golang:1.25.6-alpine3.23 AS go
ENV IMAGINARY_HASH=6a274b488759a896aff02f52afee6e50b5e3a3ee
# From https://github.com/h2non/imaginary/blob/master/Dockerfile
FROM nextcloud/imaginary:20220822
USER root
RUN set -ex; \
apk upgrade --no-cache -a; \
apk add --no-cache \
vips-dev \
vips-magick \
vips-heif \
vips-jxl \
vips-poppler \
build-base; \
go install github.com/h2non/imaginary@"$IMAGINARY_HASH";
FROM alpine:3.23.3
RUN set -ex; \
apk upgrade --no-cache -a; \
apk add --no-cache \
tzdata \
\
apt-get update; \
apt-get install -y --no-install-recommends \
ca-certificates \
netcat-openbsd \
vips \
vips-magick \
vips-heif \
vips-jxl \
vips-poppler \
ttf-dejavu \
bash
curl \
; \
rm -rf /var/lib/apt/lists/*
USER nobody
COPY --from=go /go/bin/imaginary /usr/local/bin/imaginary
COPY --chmod=775 start.sh /start.sh
COPY --chmod=775 healthcheck.sh /healthcheck.sh
ENV PORT=9000
USER 65534
# https://github.com/h2non/imaginary#memory-issues
ENV MALLOC_ARENA_MAX=2
ENTRYPOINT ["/start.sh"]
HEALTHCHECK CMD /healthcheck.sh
LABEL com.centurylinklabs.watchtower.enable="false" \
wud.watch="false" \
org.label-schema.vendor="Nextcloud"
HEALTHCHECK CMD curl -skI 127.0.0.1:9000 || exit 1

View file

@ -1,3 +0,0 @@
#!/bin/bash
nc -z 127.0.0.1 "$PORT" || exit 1

View file

@ -1,8 +0,0 @@
#!/bin/bash
echo "Imaginary has started"
if [ -z "$IMAGINARY_SECRET" ]; then
imaginary -return-size -max-allowed-resolution 222.2 "$@"
else
imaginary -return-size -max-allowed-resolution 222.2 -key "$IMAGINARY_SECRET" "$@"
fi

8
Containers/mastercontainer/.idea/.gitignore generated vendored Normal file
View file

@ -0,0 +1,8 @@
# Default ignored files
/shelf/
/workspace.xml
# Datasource local storage ignored files
/dataSources/
/dataSources.local.xml
# Editor-based HTTP Client requests
/httpRequests/

View file

@ -0,0 +1,9 @@
<?xml version="1.0" encoding="UTF-8"?>
<module type="JAVA_MODULE" version="4">
<component name="NewModuleRootManager" inherit-compiler-output="true">
<exclude-output />
<content url="file://$MODULE_DIR$" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
</component>
</module>

View file

@ -0,0 +1,6 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectRootManager">
<output url="file://$PROJECT_DIR$/out" />
</component>
</project>

View file

@ -0,0 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectModuleManager">
<modules>
<module fileurl="file://$PROJECT_DIR$/.idea/mastercontainer.iml" filepath="$PROJECT_DIR$/.idea/mastercontainer.iml" />
</modules>
</component>
</project>

View file

@ -0,0 +1,6 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="VcsDirectoryMappings">
<mapping directory="$PROJECT_DIR$/../.." vcs="Git" />
</component>
</project>

View file

@ -6,32 +6,17 @@
storage file_system {
root /mnt/docker-aio-config/caddy/
}
log {
level ERROR
}
servers {
protocols h1 h2 h2c
}
on_demand_tls {
ask http://127.0.0.1:9876/
}
}
http://:80 {
redir https://{host}{uri} permanent
redir https://{host}{uri}
}
https://:8443 {
reverse_proxy 127.0.0.1:8000
reverse_proxy localhost:8000
tls {
on_demand
issuer acme {
disable_tlsalpn_challenge
}
}
}

View file

@ -1,138 +1,105 @@
# syntax=docker/dockerfile:latest
# Docker CLI is a requirement
FROM docker:29.2.0-cli AS docker
FROM docker:20.10.17-dind-alpine3.16 as dind
# Caddy is a requirement
FROM caddy:2.10.2-alpine AS caddy
FROM caddy:2.5.2-alpine as caddy
# From https://github.com/docker-library/php/blob/master/8.4/alpine3.23/fpm/Dockerfile
FROM php:8.4.17-fpm-alpine3.23
# From https://github.com/docker-library/php/blob/master/8.0/bullseye/apache/Dockerfile
FROM php:8.0.22-apache-bullseye
EXPOSE 80
EXPOSE 8080
EXPOSE 8443
# Overwrite home variable for subservices
ENV HOME=/var/www
RUN mkdir -p /mnt/docker-aio-config/;
COPY --from=caddy /usr/bin/caddy /usr/bin/caddy
COPY --from=docker /usr/local/bin/docker /usr/local/bin/docker
VOLUME /mnt/docker-aio-config/
COPY community-containers /var/www/docker-aio/community-containers
COPY php /var/www/docker-aio/php
COPY --chmod=775 Containers/mastercontainer/*.sh /
COPY --chmod=664 Containers/mastercontainer/Caddyfile /Caddyfile
COPY --chmod=664 Containers/mastercontainer/supervisord.conf /supervisord.conf
COPY Containers/mastercontainer/mastercontainer.conf /etc/apache2/sites-available/mastercontainer.conf
RUN mkdir -p /var/www/docker-aio;
WORKDIR /var/www/docker-aio
# hadolint ignore=SC2086,DL3047,DL3003,DL3004
RUN set -ex; \
apk upgrade --no-cache -a; \
apk add --no-cache shadow; \
groupmod -g 33 www-data; \
usermod -u 33 -g 33 www-data; \
\
apk add --no-cache \
util-linux-misc \
ca-certificates \
wget \
bash \
apache2 \
apache2-proxy \
apache2-ssl \
RUN apt-get update; \
apt-get install -y --no-install-recommends \
git \
supervisor \
openssl \
sudo \
netcat-openbsd \
curl \
grep; \
\
apk add --no-cache --virtual .build-deps \
autoconf \
build-base; \
pecl install APCu-5.1.28; \
docker-php-ext-enable apcu; \
rm -r /tmp/pear; \
runDeps="$( \
scanelf --needed --nobanner --format '%n#p' --recursive /usr/local/lib/php/extensions \
| tr ',' '\n' \
| sort -u \
| awk 'system("[ -e /usr/local/lib/" $1 " ]") == 0 { next } { print "so:" $1 }' \
)"; \
apk add --no-cache --virtual .nextcloud-aio-rundeps $runDeps; \
apk del .build-deps; \
grep -q '^pm = dynamic' /usr/local/etc/php-fpm.d/www.conf; \
sed -i 's/^pm = dynamic/pm = ondemand/' /usr/local/etc/php-fpm.d/www.conf; \
sed -i 's/^pm.max_children =.*/pm.max_children = 80/' /usr/local/etc/php-fpm.d/www.conf; \
sed -i 's|access.log = /proc/self/fd/2|access.log = /proc/self/fd/1|' /usr/local/etc/php-fpm.d/docker.conf; \
grep -q ';listen.allowed_clients' /usr/local/etc/php-fpm.d/www.conf; \
sed -i 's|;listen.allowed_clients.*|listen.allowed_clients = 127.0.0.1,::1|' /usr/local/etc/php-fpm.d/www.conf; \
\
apk add --no-cache git; \
wget https://getcomposer.org/installer -O - | php -- --install-dir=/usr/local/bin --filename=composer; \
chmod +x /usr/local/bin/composer; \
dpkg-dev \
netcat \
; \
rm -rf /var/lib/apt/lists/*
COPY --from=caddy /usr/bin/caddy /usr/bin/
RUN chmod +x /usr/bin/caddy
COPY --from=dind /usr/local/bin/docker /usr/local/bin/
RUN chmod +x /usr/local/bin/docker
RUN set -ex; \
pecl install APCu-5.1.21; \
docker-php-ext-enable apcu
RUN set -e && \
curl -sS https://getcomposer.org/installer | php && \
mv composer.phar /usr/local/bin/composer && \
chmod +x /usr/local/bin/composer && \
cd /var/www/docker-aio; \
rm -r ./php/tests; \
chown www-data:www-data -R /var/www/docker-aio; \
git clone https://github.com/nextcloud-releases/all-in-one.git --depth 1 .; \
cd php; \
sudo -E -u www-data composer install --no-dev; \
sudo -E -u www-data composer clear-cache; \
composer install --no-dev; \
composer clearcache; \
cd ..; \
rm -f /usr/local/bin/composer; \
chmod -R 770 /var/www/docker-aio; \
chown -R www-data:www-data /var/www; \
rm -r php/data; \
rm -r php/session; \
\
mkdir -p /etc/apache2/certs; \
cd /etc/apache2/certs; \
openssl req -new -newkey rsa:4096 -days 3650 -nodes -x509 -subj "/C=DE/ST=BE/L=Local/O=Dev/CN=nextcloud.local" -keyout /etc/apache2/certs/ssl.key -out /etc/apache2/certs/ssl.crt; \
\
sed -i \
-e '/^Listen /d' \
-e 's/^LogLevel .*/LogLevel error/' \
-e 's|^ErrorLog .*|ErrorLog /proc/self/fd/2|' \
-e 's/User apache/User www-data/g' \
-e 's/Group apache/Group www-data/g' \
-e 's/^#\(LoadModule .*mod_rewrite.so\)/\1/' \
-e 's/^#\(LoadModule .*mod_headers.so\)/\1/' \
-e 's/^#\(LoadModule .*mod_env.so\)/\1/' \
-e 's/^#\(LoadModule .*mod_mime.so\)/\1/' \
-e 's/^#\(LoadModule .*mod_dir.so\)/\1/' \
-e 's/^#\(LoadModule .*mod_authz_core.so\)/\1/' \
-e 's/^#\(LoadModule .*mod_mpm_event.so\)/\1/' \
-e 's/\(LoadModule .*mod_mpm_worker.so\)/#\1/' \
-e 's/\(LoadModule .*mod_mpm_prefork.so\)/#\1/' \
-e 's/\(ScriptAlias \)/#\1/' \
/etc/apache2/httpd.conf; \
mkdir -p /etc/apache2/logs; \
rm /etc/apache2/conf.d/ssl.conf; \
echo "ServerName localhost" | tee -a /etc/apache2/httpd.conf; \
grep -q '^LoadModule lbmethod_heartbeat_module' /etc/apache2/conf.d/proxy.conf; \
sed -i 's|^LoadModule lbmethod_heartbeat_module.*|#LoadModule lbmethod_heartbeat_module|' /etc/apache2/conf.d/proxy.conf; \
echo "SSLSessionCache nonenotnull" | tee -a /etc/apache2/httpd.conf; \
echo "LoadModule ssl_module modules/mod_ssl.so" | tee -a /etc/apache2/httpd.conf; \
echo "LoadModule socache_shmcb_module modules/mod_socache_shmcb.so" | tee -a /etc/apache2/httpd.conf; \
echo "Include /etc/apache2/sites-available/mastercontainer.conf" | tee -a /etc/apache2/httpd.conf; \
\
rm -f /etc/apache2/conf.d/default.conf \
/etc/apache2/conf.d/userdir.conf \
/etc/apache2/conf.d/info.conf; \
\
rm -rf /var/www/localhost/cgi-bin/; \
mkdir /var/log/supervisord; \
chmod 770 -R ./; \
chown www-data:www-data -R ./; \
rm -r ./php/data; \
rm -r ./php/session
RUN mkdir -p /etc/apache2/certs && \
cd /etc/apache2/certs && \
openssl req -new -newkey rsa:4096 -days 3650 -nodes -x509 -subj "/C=DE/ST=BE/L=Local/O=Dev/CN=nextcloud.local" -keyout ./ssl.key -out ./ssl.crt;
COPY mastercontainer.conf /etc/apache2/sites-available/
RUN a2enmod rewrite \
headers \
env \
mime \
dir \
authz_core \
proxy \
proxy_http \
ssl
RUN rm /etc/apache2/ports.conf; \
sed -s -i -e "s/Include ports.conf//" /etc/apache2/apache2.conf; \
sed -i "/^Listen /d" /etc/apache2/apache2.conf
RUN a2dissite 000-default && \
a2dissite default-ssl && \
a2ensite mastercontainer.conf
RUN mkdir /var/log/supervisord; \
mkdir /var/run/supervisord;
# hadolint ignore=DL3048
LABEL org.label-schema.vendor="Nextcloud" \
wud.watch="false" \
com.docker.compose.project="nextcloud-aio"
COPY Caddyfile /
COPY start.sh /usr/bin/
COPY backup-time-file-watcher.sh /
COPY session-deduplicator.sh /
COPY cron.sh /
COPY daily-backup.sh /
COPY supervisord.conf /
RUN chmod +x /usr/bin/start.sh; \
chmod +x /cron.sh; \
chmod +x /session-deduplicator.sh; \
chmod +x /backup-time-file-watcher.sh; \
chmod +x /daily-backup.sh; \
chmod a+r /Caddyfile
# hadolint ignore=DL3002
USER root
ENTRYPOINT ["/start.sh"]
ENTRYPOINT ["start.sh"]
CMD ["/usr/bin/supervisord", "-c", "/supervisord.conf"]
HEALTHCHECK CMD /healthcheck.sh
HEALTHCHECK CMD curl -skfI https://localhost:8080 || exit 1

View file

@ -1,69 +0,0 @@
# Nextcloud All-in-One `mastercontainer`
This folder contains the OCI/Docker container definition, along with associated resources and
configuration files, for building the `mastercontainer` as part of the Nextcloud All-in-One
project. This container hosts [the Nextcloud AIO interface](
https://github.com/nextcloud/all-in-one/tree/main/php)[^app], and a dedicated PHP environment
for it (which is completely independent of the Nextcloud Server).
## Overview
The mastercontainer acts as the central orchestration service for the deployment and management
of all other containers in the Nextcloud All-in-One stack. It hosts:
- A dedicated PHP SAPI/backend (php-fpm) for AIO itself (not Nextcloud Server)
- An Apache service for accessing the AIO interface via a self-signed HTTPS VirtualHost on 8080/tcp
- A Caddy reverse proxy service enabling HTTPS access to the AIO frontend on port 8443/tcp.
- Caddy will automatically issue a Let's Encrypt issued certificate if port 80 and 8443
is open/forwarded and a domain pointer is in place; then, simply open the Nextcloud AIO interface using the
domain (`https://your-domain-that-points-to-this-server.tld:8443`). The Let's Encrypt certificate request will
use an [ACME HTTP-01](https://letsencrypt.org/docs/challenge-types/#http-01-challenge) challenge.
- Miscellaneous support services specific to AIO (backup management, health checks, etc.)
## Key Responsibilities
- Orchestrates the deployment and lifecycle of all Nextcloud service containers
- Handles initial setup and container configuration
- Coordinates image updates
- Monitors general system health
It triggers the initial installation and ensures the smooth operation of the Nextcloud
All-in-One stack.
## Contents
- **Dockerfile**: Instructions for building the mastercontainer image.
- **Entrypoint script**: The `start.sh` script is used for container initialization and runtime
configuration before starting supervisord.
- [**Nextcloud All-in-One Controller App**](https://github.com/nextcloud/all-in-one/tree/main/php): The
core AIO orchestrator that handles configuration and settings for the containers.
- **Supervisor**: The `supervisord.conf` file defines the long-running services hosted within
the container (php-fpm, cron, etc.)
## Usage
This container should be used as the trigger image when deploying the Nextcloud All-in-One
stack in a Docker or other OCI-compliant container environment. For detailed deployment
instructions, refer to the [project documentation](
https://github.com/nextcloud/all-in-one).
## Related Resources
- [Main Repository](https://github.com/nextcloud/all-in-one)
- [Documentation](https://github.com/nextcloud/all-in-one#readme)
## Contributing
Contributions are welcome! Please follow the Nextcloud project's guidelines and submit pull
requests or issues via the main repository.
## License
This folder and its contents are licensed under the
[GNU AGPLv3](https://www.gnu.org/licenses/agpl-3.0.html), in line with the rest of Nextcloud
All-in-One.
[^app]: The Nextcloud All-in-One interface allows users to install, configure, and
manage their Nextcloud instance and related containers via a secure web interface and API.
It automates and simplifies complex tasks such as container orchestration, backups, updates,
and service management for users deploying Nextcloud in Docker environments.

View file

@ -12,20 +12,15 @@ while true; do
export AUTOMATIC_UPDATES=0
export START_CONTAINERS=1
fi
if [ "$(sed -n '3p' "/mnt/docker-aio-config/data/daily_backup_time")" != 'successNotificationsAreNotEnabled' ]; then
export SEND_SUCCESS_NOTIFICATIONS=1
else
export SEND_SUCCESS_NOTIFICATIONS=0
fi
set +x
if [ -f "/mnt/docker-aio-config/data/daily_backup_running" ]; then
export LOCK_FILE_PRESENT=1
else
export LOCK_FILE_PRESENT=0
fi
else
export BACKUP_TIME="04:00"
export DAILY_BACKUP=0
fi
if [ -f "/mnt/docker-aio-config/data/daily_backup_running" ]; then
export LOCK_FILE_PRESENT=1
else
export LOCK_FILE_PRESENT=0
fi
@ -43,32 +38,14 @@ while true; do
# Make sure to delete the lock file always
rm -f "/mnt/docker-aio-config/data/daily_backup_running"
# Check for updates and send notification if yes on saturdays
if [ "$(date +%u)" = 6 ]; then
sudo -E -u www-data php /var/www/docker-aio/php/src/Cron/UpdateNotification.php
fi
# Check if AIO is outdated
sudo -E -u www-data php /var/www/docker-aio/php/src/Cron/OutdatedNotification.php
# Check for updates and send notification if yes
sudo -u www-data php /var/www/docker-aio/php/src/Cron/UpdateNotification.php
# Remove sessions older than 24h
find "/mnt/docker-aio-config/session/" -mindepth 1 -mmin +1440 -delete
# Remove nextcloud-aio-domaincheck container
if sudo -E -u www-data docker ps --format "{{.Names}}" --filter "status=exited" | grep -q "^nextcloud-aio-domaincheck$"; then
sudo -E -u www-data docker container remove nextcloud-aio-domaincheck
fi
# Remove dangling images
sudo -E -u www-data docker image prune --filter "label=org.label-schema.vendor=Nextcloud" --force
# Check for available free space
sudo -E -u www-data php /var/www/docker-aio/php/src/Cron/CheckFreeDiskSpace.php
# Remove mastercontainer from default bridge network
if sudo -E -u www-data docker inspect nextcloud-aio-mastercontainer --format "{{.NetworkSettings.Networks}}" | grep -q "bridge"; then
sudo -E -u www-data docker network disconnect bridge nextcloud-aio-mastercontainer
fi
sudo -u www-data docker image prune -f
# Wait 60s so that the whole loop will not be executed again
sleep 60

View file

@ -1,42 +1,20 @@
#!/bin/bash
echo "Daily backup script has started"
# Check if initial configuration has been done, otherwise this script should do nothing.
CONFIG_FILE=/mnt/docker-aio-config/data/configuration.json
if ! [ -f "$CONFIG_FILE" ] || (! grep -q "wasStartButtonClicked.*1" "$CONFIG_FILE" && ! grep -q "wasStartButtonClicked.*true" "$CONFIG_FILE"); then
echo "Initial configuration via AIO interface not done yet. Exiting..."
exit 0
fi
# Daily backup and backup check cannot be run at the same time
if [ "$DAILY_BACKUP" = 1 ] && [ "$CHECK_BACKUP" = 1 ]; then
echo "Daily backup and backup check cannot be run at the same time. Exiting..."
exit 1
fi
echo "Daily backup has started"
# Delete all active sessions and create a lock file
# But don't kick out the user if the mastercontainer was just updated since we block the interface either way with the lock file
if [ "$LOCK_FILE_PRESENT" = 0 ] || ! [ -f "/mnt/docker-aio-config/data/daily_backup_running" ]; then
find "/mnt/docker-aio-config/session/" -mindepth 1 -delete
if [ "$LOCK_FILE_PRESENT" = 0 ]; then
rm -f "/mnt/docker-aio-config/session/"*
fi
sudo -E -u www-data touch "/mnt/docker-aio-config/data/daily_backup_running"
sudo -u www-data touch "/mnt/docker-aio-config/data/daily_backup_running"
# Check if apache is running/stopped, watchtower is stopped and backupcontainer is stopped
LOCAL_APACHE_PORT="$(docker inspect nextcloud-aio-apache --format "{{.Config.Env}}" | grep -o 'APACHE_PORT=[0-9]\+' | grep -o '[0-9]\+' | head -1)"
if [ -z "$LOCAL_APACHE_PORT" ]; then
echo "APACHE_PORT is not set which is not expected..."
else
# Connect mastercontainer to nextcloud-aio network to make sure that nextcloud-aio-apache is reachable
# Prevent issues like https://github.com/nextcloud/all-in-one/discussions/5222
docker network connect nextcloud-aio nextcloud-aio-mastercontainer &>/dev/null
# Wait for apache to start
while docker ps --format "{{.Names}}" | grep -q "^nextcloud-aio-apache$" && ! nc -z nextcloud-aio-apache "$LOCAL_APACHE_PORT"; do
echo "Waiting for apache to become available"
sleep 30
done
fi
APACHE_PORT="$(docker inspect nextcloud-aio-apache --format "{{.HostConfig.PortBindings}}" | grep -oP '[0-9]+' | head -1)"
while docker ps --format "{{.Names}}" | grep -q "^nextcloud-aio-apache$" && ! nc -z nextcloud-aio-apache "$APACHE_PORT"; do
echo "Waiting for apache to become available"
sleep 30
done
while docker ps --format "{{.Names}}" | grep -q "^nextcloud-aio-watchtower$"; do
echo "Waiting for watchtower to stop"
sleep 30
@ -48,71 +26,42 @@ done
# Update the mastercontainer
if [ "$AUTOMATIC_UPDATES" = 1 ]; then
echo "Starting mastercontainer update..."
echo "(The script might get exited due to that. In order to update all the other containers correctly, you need to run this script with the same settings a second time.)"
sudo -E -u www-data php /var/www/docker-aio/php/src/Cron/UpdateMastercontainer.php
sudo -u www-data php /var/www/docker-aio/php/src/Cron/UpdateMastercontainer.php
fi
# Wait for watchtower to stop
if [ "$AUTOMATIC_UPDATES" = 1 ]; then
if ! docker ps --format "{{.Names}}" | grep -q "^nextcloud-aio-watchtower$"; then
echo "Something seems to be wrong: Watchtower should be started at this step."
fi
if [ "$AUTOMATIC_UPDATES" = 1 ] && ! docker ps --format "{{.Names}}" | grep -q "^nextcloud-aio-watchtower$"; then
echo "Something seems to be wrong: Watchtower should be started at this step."
else
while docker ps --format "{{.Names}}" | grep -q "^nextcloud-aio-watchtower$"; do
echo "Waiting for watchtower to stop"
sleep 30
done
fi
# Update container images to reduce downtime later on
if [ "$AUTOMATIC_UPDATES" = 1 ]; then
echo "Updating container images..."
sudo -E -u www-data php /var/www/docker-aio/php/src/Cron/PullContainerImages.php
fi
# Stop containers if required
# shellcheck disable=SC2235
if [ "$CHECK_BACKUP" != 1 ] && ([ "$DAILY_BACKUP" != 1 ] || [ "$STOP_CONTAINERS" = 1 ]); then
echo "Stopping containers..."
sudo -E -u www-data php /var/www/docker-aio/php/src/Cron/StopContainers.php
if [ "$DAILY_BACKUP" != 1 ] || [ "$STOP_CONTAINERS" = 1 ]; then
sudo -u www-data php /var/www/docker-aio/php/src/Cron/StopContainers.php
fi
# Execute the backup itself and some related tasks (also stops the containers)
if [ "$DAILY_BACKUP" = 1 ]; then
echo "Creating daily backup..."
sudo -E -u www-data php /var/www/docker-aio/php/src/Cron/CreateBackup.php
if ! docker ps --format "{{.Names}}" | grep -q "^nextcloud-aio-borgbackup$"; then
echo "Something seems to be wrong: the borg container should be started at this step."
fi
while docker ps --format "{{.Names}}" | grep -q "^nextcloud-aio-borgbackup$"; do
echo "Waiting for backup container to stop"
sleep 30
done
fi
# Execute backup check
if [ "$CHECK_BACKUP" = 1 ]; then
echo "Starting backup check..."
sudo -E -u www-data php /var/www/docker-aio/php/src/Cron/CheckBackup.php
sudo -u www-data php /var/www/docker-aio/php/src/Cron/CreateBackup.php
fi
# Start and/or update containers
if [ "$AUTOMATIC_UPDATES" = 1 ]; then
echo "Starting and updating containers..."
sudo -E -u www-data php /var/www/docker-aio/php/src/Cron/StartAndUpdateContainers.php
sudo -u www-data php /var/www/docker-aio/php/src/Cron/StartAndUpdateContainers.php
else
if [ "$START_CONTAINERS" = 1 ]; then
echo "Starting containers without updating them..."
sudo -E -u www-data php /var/www/docker-aio/php/src/Cron/StartContainers.php
sudo -u www-data php /var/www/docker-aio/php/src/Cron/StartContainers.php
fi
fi
# Delete the lock file
rm -f "/mnt/docker-aio-config/data/daily_backup_running"
# Send backup notification
# shellcheck disable=SC2235
if [ "$DAILY_BACKUP" = 1 ] && ([ "$AUTOMATIC_UPDATES" = 1 ] || [ "$START_CONTAINERS" = 1 ]); then
if [ "$DAILY_BACKUP" = 1 ]; then
# Wait for the nextcloud container to start and send if the backup was successful
if ! docker ps --format "{{.Names}}" | grep -q "^nextcloud-aio-nextcloud$"; then
echo "Something seems to be wrong: Nextcloud should be started at this step."
@ -126,8 +75,7 @@ if [ "$DAILY_BACKUP" = 1 ] && ([ "$AUTOMATIC_UPDATES" = 1 ] || [ "$START_CONTAIN
fi
done
fi
echo "Sending backup notification..."
sudo -E -u www-data php /var/www/docker-aio/php/src/Cron/BackupNotification.php
sudo -u www-data php /var/www/docker-aio/php/src/Cron/BackupNotification.php
fi
echo "Daily backup script has finished"
echo "Daily backup has finished"

View file

@ -1,10 +0,0 @@
#!/bin/bash
if [ -f "/mnt/docker-aio-config/data/configuration.json" ]; then
nc -z 127.0.0.1 80 || exit 1
nc -z 127.0.0.1 8000 || exit 1
nc -z 127.0.0.1 8080 || exit 1
nc -z 127.0.0.1 8443 || exit 1
nc -z 127.0.0.1 9000 || exit 1
nc -z 127.0.0.1 9876 || exit 1
fi

View file

@ -1,5 +1,8 @@
Listen 127.0.0.1:8000
Listen 8080 https
Listen 8000
Listen 8080
CustomLog ${APACHE_LOG_DIR}/access.log combined
ErrorLog ${APACHE_LOG_DIR}/error.log
# Deny access to .ht files
<Files ".ht*">
@ -7,19 +10,10 @@ Listen 8080 https
</Files>
# Http host
<VirtualHost 127.0.0.1:8000>
ServerName 127.0.0.1
# Add error log
CustomLog /proc/self/fd/1 proxy
LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\"" proxy
ErrorLog /proc/self/fd/2
ErrorLogFormat "[%t] [%l] [%E] [client: %{X-Forwarded-For}i] [%M] [%{User-Agent}i]"
LogLevel warn
<VirtualHost *:8000>
# PHP match
<FilesMatch "\.php$">
SetHandler "proxy:fcgi://127.0.0.1:9000"
SetHandler application/x-httpd-php
</FilesMatch>
# Master dir
DocumentRoot /var/www/docker-aio/php/public/
@ -41,22 +35,16 @@ Listen 8080 https
# Https host
<VirtualHost *:8080>
# Proxy to https
ProxyPass / http://127.0.0.1:8000/
ProxyPassReverse / http://127.0.0.1:8000/
ProxyPass / http://localhost:8000/
ProxyPassReverse / http://localhost:8000/
ProxyPreserveHost On
# SSL
SSLCertificateKeyFile /etc/apache2/certs/ssl.key
SSLCertificateFile /etc/apache2/certs/ssl.crt
SSLEngine on
SSLProtocol -all +TLSv1.2 +TLSv1.3
SSLCipherSuite ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-CHACHA20-POLY1305
SSLHonorCipherOrder off
SSLSessionTickets off
</VirtualHost>
# Increase timeout in case e.g. the initial download takes a long time
Timeout 7200
ProxyTimeout 7200
# See https://httpd.apache.org/docs/trunk/mod/core.html#traceenable
TraceEnable Off

View file

@ -1,22 +1,23 @@
#!/bin/bash
deduplicate_sessions() {
echo "Deleting duplicate sessions"
find "/mnt/docker-aio-config/session/" -mindepth 1 -exec grep -qv "$NEW_SESSION_TIME" {} \; -delete
}
compare_times() {
if [ -f "/mnt/docker-aio-config/data/session_date_file" ]; then
unset NEW_SESSION_TIME
NEW_SESSION_TIME="$(cat "/mnt/docker-aio-config/data/session_date_file")"
if [ -n "$NEW_SESSION_TIME" ] && [ -n "$OLD_SESSION_TIME" ] && [ "$NEW_SESSION_TIME" != "$OLD_SESSION_TIME" ]; then
deduplicate_sessions
fi
OLD_SESSION_TIME="$NEW_SESSION_TIME"
fi
}
while true; do
compare_times
sleep 2
while [ "$(find "/mnt/docker-aio-config/session/" -mindepth 1 -exec grep "aio_authenticated|[a-z]:1" {} \; | wc -l)" -gt 1 ]; do
unset SESSION_FILES
SESSION_FILES="$(find "/mnt/docker-aio-config/session/" -mindepth 1)"
unset SESSION_FILES_ARRAY
mapfile -t SESSION_FILES_ARRAY <<< "$SESSION_FILES"
for SESSION_FILE in "${SESSION_FILES_ARRAY[@]}"; do
if ! grep -q "aio_authenticated|[a-z]:1" "$SESSION_FILE"; then
rm "$SESSION_FILE"
fi
done
echo "Deleting duplicate sessions"
unset OLDEST_FILE
set -x
# shellcheck disable=SC2012
OLDEST_FILE="$(ls -t "/mnt/docker-aio-config/session/" | tail -1)"
rm "/mnt/docker-aio-config/session/$OLDEST_FILE"
set +x
done
sleep 5
done

309
Containers/mastercontainer/start.sh Normal file → Executable file
View file

@ -6,12 +6,6 @@ print_green() {
printf "%b%s%b\n" "\e[0;92m" "$TEXT" "\e[0m"
}
# Function to show text in red
print_red() {
local TEXT="$1"
printf "%b%s%b\n" "\e[0;31m" "$TEXT" "\e[0m"
}
# Function to check if number was provided
check_if_number() {
case "${1}" in
@ -20,42 +14,18 @@ case "${1}" in
esac
}
# Check if running as root user
if [ "$EUID" != "0" ]; then
print_red "Container does not run as root user. This is not supported."
exit 1
fi
# Check that the CMD is not overwritten nor set
if [ "$*" != "" ]; then
print_red "Docker run command for AIO is incorrect as a CMD option was given which is not expected."
exit 1
fi
# Check if socket is available and readable
if ! [ -e "/var/run/docker.sock" ]; then
print_red "Docker socket is not available. Cannot continue."
echo "Please make sure to mount the docker socket into /var/run/docker.sock inside the container!"
echo "If you did this by purpose because you don't want the container to have access to the docker socket, see https://github.com/nextcloud/all-in-one/tree/main/manual-install."
echo "And https://github.com/nextcloud/all-in-one/blob/main/manual-install/latest.yml"
if ! [ -a "/var/run/docker.sock" ]; then
echo "Docker socket is not available. Cannot continue."
exit 1
elif ! mountpoint -q "/mnt/docker-aio-config"; then
print_red "/mnt/docker-aio-config is not a mountpoint. Cannot proceed!"
echo "Please make sure to mount the nextcloud_aio_mastercontainer docker volume into /mnt/docker-aio-config inside the container!"
echo "If you are on TrueNas SCALE, see https://github.com/nextcloud/all-in-one#can-i-run-aio-on-truenas-scale"
echo "/mnt/docker-aio-config is not a mountpoint. Cannot proceed!"
exit 1
elif mountpoint -q /var/www/docker-aio/php/containers.json; then
print_red "/var/www/docker-aio/php/containers.json is a mountpoint. Cannot proceed!"
echo "This is a not-supported customization of the mastercontainer!"
echo "Please remove this bind-mount from the mastercontainer."
echo "If you need to customize things, feel free to use https://github.com/nextcloud/all-in-one/tree/main/manual-install"
echo "See https://github.com/nextcloud/all-in-one/blob/main/manual-install/latest.yml"
exit 1
elif ! sudo -E -u www-data test -r /var/run/docker.sock; then
elif ! sudo -u www-data test -r /var/run/docker.sock; then
echo "Trying to fix docker.sock permissions internally..."
DOCKER_GROUP=$(stat -c '%G' /var/run/docker.sock)
DOCKER_GROUP_ID=$(stat -c '%g' /var/run/docker.sock)
# Check if a group with the same group name of /var/run/docker.socket already exists in the container
# Check if a group with the same group id of /var/run/docker.socket already exists in the container
if grep -q "^$DOCKER_GROUP:" /etc/group; then
# If yes, add www-data to that group
echo "Adding internal www-data to group $DOCKER_GROUP"
@ -69,68 +39,25 @@ elif ! sudo -E -u www-data test -r /var/run/docker.sock; then
groupadd -g "$DOCKER_GROUP_ID" docker
usermod -aG docker www-data
fi
if ! sudo -E -u www-data test -r /var/run/docker.sock; then
print_red "Docker socket is not readable by the www-data user. Cannot continue."
if ! sudo -u www-data test -r /var/run/docker.sock; then
echo "Docker socket is not readable by the www-data user. Cannot continue."
exit 1
fi
fi
# Get default docker api version
API_VERSION_FILE="$(find ./ -name DockerActionManager.php | head -1)"
API_VERSION="$(grep -oP 'const string API_VERSION.*\;' "$API_VERSION_FILE" | grep -oP '[0-9]+.[0-9]+' | head -1)"
if [ -z "$API_VERSION" ]; then
print_red "Could not get API_VERSION. Something is wrong!"
# Check if api version is supported
if ! sudo -u www-data docker info &>/dev/null; then
echo "Cannot connect to the docker socket. Cannot proceed."
exit 1
fi
# Check if DOCKER_API_VERSION is set globally
if [ -n "$DOCKER_API_VERSION" ]; then
if ! echo "$DOCKER_API_VERSION" | grep -q '^[0-9].[0-9]\+$'; then
print_red "You've set DOCKER_API_VERSION but not to an allowed value.
The string must be a version number like e.g. '1.44'.
It is set to '$DOCKER_API_VERSION'."
exit 1
fi
print_red "DOCKER_API_VERSION was found to be set to '$DOCKER_API_VERSION'."
print_red "Please note that only v$API_VERSION is officially supported and tested by the maintainers of Nextcloud AIO."
print_red "So you run on your own risk and things might break without warning."
else
# Export docker api version to use it everywhere
export DOCKER_API_VERSION="$API_VERSION"
fi
# Set a fallback docker api version. Needed for api version check.
# The check will not work otherwise on old docker versions
FALLBACK_DOCKER_API_VERSION="1.41"
# Check if docker info can be used
if ! sudo -E -u www-data docker info &>/dev/null; then
if ! sudo -E -u www-data DOCKER_API_VERSION="$FALLBACK_DOCKER_API_VERSION" docker info &>/dev/null; then
print_red "Cannot connect to the docker socket. Cannot proceed."
echo "Did you maybe remove group read permissions for the docker socket? AIO needs them in order to access the docker socket."
echo "If SELinux is enabled on your host, see https://github.com/nextcloud/all-in-one#are-there-known-problems-when-selinux-is-enabled"
echo "If you are on TrueNas SCALE, see https://github.com/nextcloud/all-in-one#can-i-run-aio-on-truenas-scale"
echo "On macOS, see https://github.com/nextcloud/all-in-one#how-to-run-aio-on-macos"
echo "Another possibility might be that Docker api v$API_VERSION is not supported by your docker daemon."
echo "In that case, you should report this to https://github.com/nextcloud/all-in-one/issues"
echo ""
exit 1
fi
fi
# Docker api version check
API_VERSION_FILE="$(find ./ -name DockerActionManager.php | head -1)"
API_VERSION="$(grep -oP 'const API_VERSION.*\;' "$API_VERSION_FILE" | grep -oP '[0-9]+.[0-9]+' | head -1)"
# shellcheck disable=SC2001
API_VERSION_NUMB="$(echo "$DOCKER_API_VERSION" | sed 's/\.//')"
LOCAL_API_VERSION_NUMB="$(sudo -E -u www-data docker version | grep -i "api version" | grep -oP '[0-9]+.[0-9]+' | head -1 | sed 's/\.//')"
if [ -z "$LOCAL_API_VERSION_NUMB" ]; then
LOCAL_API_VERSION_NUMB="$(sudo -E -u www-data DOCKER_API_VERSION="$FALLBACK_DOCKER_API_VERSION" docker version | grep -i "api version" | grep -oP '[0-9]+.[0-9]+' | head -1 | sed 's/\.//')"
fi
API_VERSION_NUMB="$(echo "$API_VERSION" | sed 's/\.//')"
LOCAL_API_VERSION_NUMB="$(sudo -u www-data docker version | grep -i "api version" | grep -oP '[0-9]+.[0-9]+' | head -1 | sed 's/\.//')"
if [ -n "$LOCAL_API_VERSION_NUMB" ] && [ -n "$API_VERSION_NUMB" ]; then
if ! [ "$LOCAL_API_VERSION_NUMB" -ge "$API_VERSION_NUMB" ]; then
print_red "Docker API v$DOCKER_API_VERSION is not supported by your docker engine. Cannot proceed. Please upgrade your docker engine if you want to run Nextcloud AIO!"
echo "Alternatively, set the DOCKER_API_VERSION environmental variable to a compatible version."
echo "However please note that only v$API_VERSION is officially supported and tested by the maintainers of Nextcloud AIO."
echo "See https://github.com/nextcloud/all-in-one#how-to-adjust-the-internally-used-docker-api-version"
echo "Docker API v$API_VERSION is not supported by your docker engine. Cannot proceed. Please upgrade your docker engine if you want to run Nextcloud AIO!"
exit 1
fi
else
@ -138,71 +65,48 @@ else
sleep 10
fi
# Check Storage drivers
STORAGE_DRIVER="$(sudo -E -u www-data docker info | grep "Storage Driver")"
# Check if vfs is used: https://github.com/nextcloud/all-in-one/discussions/1467
if echo "$STORAGE_DRIVER" | grep -q vfs; then
echo "$STORAGE_DRIVER"
print_red "Warning: It seems like the storage driver vfs is used. This will lead to problems with disk space and performance and is disrecommended!"
elif echo "$STORAGE_DRIVER" | grep -q fuse-overlayfs; then
echo "$STORAGE_DRIVER"
print_red "Warning: It seems like the storage driver fuse-overlayfs is used. Please check if you can switch to overlay2 instead."
fi
# Check if snap install
if sudo -E -u www-data docker info | grep "Docker Root Dir" | grep "/var/snap/docker/"; then
print_red "Warning: It looks like your installation uses docker installed via snap."
print_red "This comes with some limitations and is disrecommended by the docker maintainers."
print_red "See for example https://github.com/nextcloud/all-in-one/discussions/4890#discussioncomment-10386752"
fi
# Check if startup command was executed correctly
if ! sudo -E -u www-data docker ps --format "{{.Names}}" | grep -q "^nextcloud-aio-mastercontainer$"; then
print_red "It seems like you did not give the mastercontainer the correct name? (The 'nextcloud-aio-mastercontainer' container was not found.)
Using a different name is not supported since mastercontainer updates will not work in that case!
If you are on docker swarm and try to run AIO, see https://github.com/nextcloud/all-in-one#can-i-run-this-with-docker-swarm"
if ! sudo -u www-data docker ps | grep -q "nextcloud-aio-mastercontainer"; then
echo "It seems like you did not give the mastercontainer the correct name?
Using a different name is not supported!"
exit 1
elif ! sudo -E -u www-data docker volume ls --format "{{.Name}}" | grep -q "^nextcloud_aio_mastercontainer$"; then
print_red "It seems like you did not give the mastercontainer volume the correct name? (The 'nextcloud_aio_mastercontainer' volume was not found.)
Using a different name is not supported since the built-in backup solution will not work in that case!"
exit 1
elif ! sudo -E -u www-data docker inspect nextcloud-aio-mastercontainer --format '{{.Mounts}}' | grep -q " nextcloud_aio_mastercontainer "; then
print_red "It seems like you did not attach the 'nextcloud_aio_mastercontainer' volume to the mastercontainer?
This is not supported since the built-in backup solution will not work in that case!"
elif ! sudo -u www-data docker volume ls | grep -q "nextcloud_aio_mastercontainer"; then
echo "It seems like you did not give the mastercontainer volume the correct name?
Using a different name is not supported!"
exit 1
fi
# Check for other options
if [ -n "$NEXTCLOUD_DATADIR" ]; then
if [ "$NEXTCLOUD_DATADIR" = "nextcloud_aio_nextcloud_datadir" ]; then
sleep 1
echo "NEXTCLOUD_DATADIR is set to $NEXTCLOUD_DATADIR"
elif ! echo "$NEXTCLOUD_DATADIR" | grep -q "^/" || [ "$NEXTCLOUD_DATADIR" = "/" ]; then
print_red "You've set NEXTCLOUD_DATADIR but not to an allowed value.
The string must start with '/' and must not be equal to '/'. Also allowed is 'nextcloud_aio_nextcloud_datadir'.
echo "You've set NEXTCLOUD_DATADIR but not to an allowed value.
The string must start with '/' and must not be equal to '/'.
It is set to '$NEXTCLOUD_DATADIR'."
exit 1
fi
fi
if [ -n "$NEXTCLOUD_MOUNT" ]; then
if ! echo "$NEXTCLOUD_MOUNT" | grep -q "^/" || [ "$NEXTCLOUD_MOUNT" = "/" ]; then
print_red "You've set NEXTCLOUD_MOUNT but not to an allowed value.
echo "You've set NEXCLOUD_MOUNT but not to an allowed value.
The string must start with '/' and must not be equal to '/'.
It is set to '$NEXTCLOUD_MOUNT'."
exit 1
elif [ "$NEXTCLOUD_MOUNT" = "/mnt/ncdata" ] || echo "$NEXTCLOUD_MOUNT" | grep -q "^/mnt/ncdata/"; then
print_red "'/mnt/ncdata' and '/mnt/ncdata/' are not allowed as values for NEXTCLOUD_MOUNT."
echo "'/mnt/ncdata' and '/mnt/ncdata/' are not allowed as values for NEXTCLOUD_MOUNT."
exit 1
fi
fi
if [ -n "$NEXTCLOUD_DATADIR" ] && [ -n "$NEXTCLOUD_MOUNT" ]; then
if [ "$NEXTCLOUD_DATADIR" = "$NEXTCLOUD_MOUNT" ]; then
print_red "NEXTCLOUD_DATADIR and NEXTCLOUD_MOUNT are not allowed to be equal."
echo "NEXTCLOUD_DATADIR and NEXTCLOUD_MOUNT are not allowed to be equal."
exit 1
fi
fi
if [ -n "$NEXTCLOUD_UPLOAD_LIMIT" ]; then
if ! echo "$NEXTCLOUD_UPLOAD_LIMIT" | grep -q '^[0-9]\+G$'; then
print_red "You've set NEXTCLOUD_UPLOAD_LIMIT but not to an allowed value.
echo "You've set NEXTCLOUD_UPLOAD_LIMIT but not to an allowed value.
The string must start with a number and end with 'G'.
It is set to '$NEXTCLOUD_UPLOAD_LIMIT'."
exit 1
@ -210,153 +114,64 @@ It is set to '$NEXTCLOUD_UPLOAD_LIMIT'."
fi
if [ -n "$NEXTCLOUD_MAX_TIME" ]; then
if ! echo "$NEXTCLOUD_MAX_TIME" | grep -q '^[0-9]\+$'; then
print_red "You've set NEXTCLOUD_MAX_TIME but not to an allowed value.
echo "You've set NEXTCLOUD_MAX_TIME but not to an allowed value.
The string must be a number. E.g. '3600'.
It is set to '$NEXTCLOUD_MAX_TIME'."
exit 1
fi
fi
if [ -n "$NEXTCLOUD_MEMORY_LIMIT" ]; then
if ! echo "$NEXTCLOUD_MEMORY_LIMIT" | grep -q '^[0-9]\+M$'; then
print_red "You've set NEXTCLOUD_MEMORY_LIMIT but not to an allowed value.
The string must start with a number and end with 'M'.
It is set to '$NEXTCLOUD_MEMORY_LIMIT'."
exit 1
fi
fi
if [ -n "$APACHE_PORT" ]; then
if ! check_if_number "$APACHE_PORT"; then
print_red "You provided an Apache port but did not only use numbers.
echo "You provided an Apache port but did not only use numbers.
It is set to '$APACHE_PORT'."
exit 1
elif ! [ "$APACHE_PORT" -le 65535 ] || ! [ "$APACHE_PORT" -ge 1 ]; then
print_red "The provided Apache port is invalid. It must be between 1 and 65535"
echo "The provided Apache port is invalid. It must be between 1 and 65535"
exit 1
fi
fi
if [ -n "$APACHE_IP_BINDING" ]; then
if ! echo "$APACHE_IP_BINDING" | grep -q '^[0-9]\+\.[0-9]\+\.[0-9]\+\.[0-9]\+$\|^[0-9a-f:]\+$\|^@INTERNAL$'; then
print_red "You provided an ip-address for the apache container's ip-binding but it was not a valid ip-address.
if ! echo "$APACHE_IP_BINDING" | grep -q '^[0-9.]\+$'; then
echo "You provided an ip-address for the apache container's ip-binding but it was not a valid ip-address.
It is set to '$APACHE_IP_BINDING'."
exit 1
fi
fi
if [ -n "$APACHE_ADDITIONAL_NETWORK" ]; then
if ! echo "$APACHE_ADDITIONAL_NETWORK" | grep -q "^[a-zA-Z0-9._-]\+$"; then
print_red "You've set APACHE_ADDITIONAL_NETWORK but not to an allowed value.
It needs to be a string with letters, numbers, hyphens and underscores.
It is set to '$APACHE_ADDITIONAL_NETWORK'."
exit 1
fi
fi
if [ -n "$TALK_PORT" ]; then
if ! check_if_number "$TALK_PORT"; then
print_red "You provided an Talk port but did not only use numbers.
echo "You provided an Talk port but did not only use numbers.
It is set to '$TALK_PORT'."
exit 1
elif ! [ "$TALK_PORT" -le 65535 ] || ! [ "$TALK_PORT" -ge 1 ]; then
print_red "The provided Talk port is invalid. It must be between 1 and 65535"
echo "The provided Talk port is invalid. It must be between 1 and 65535"
exit 1
fi
fi
if [ -n "$APACHE_PORT" ] && [ -n "$TALK_PORT" ]; then
if [ "$APACHE_PORT" = "$TALK_PORT" ]; then
print_red "APACHE_PORT and TALK_PORT are not allowed to be equal."
echo "APACHE_PORT and TALK_PORT are not allowed to be equal."
exit 1
fi
fi
if [ -n "$WATCHTOWER_DOCKER_SOCKET_PATH" ]; then
if ! echo "$WATCHTOWER_DOCKER_SOCKET_PATH" | grep -q "^/" || echo "$WATCHTOWER_DOCKER_SOCKET_PATH" | grep -q "/$"; then
print_red "You've set WATCHTOWER_DOCKER_SOCKET_PATH but not to an allowed value.
if [ -n "$DOCKER_SOCKET_PATH" ]; then
if ! echo "$DOCKER_SOCKET_PATH" | grep -q "^/" || echo "$DOCKER_SOCKET_PATH" | grep -q "/$"; then
echo "You've set DOCKER_SOCKET_PATH but not to an allowed value.
The string must start with '/' and must not end with '/'.
It is set to '$WATCHTOWER_DOCKER_SOCKET_PATH'."
It is set to '$DOCKER_SOCKET_PATH'."
exit 1
fi
fi
if [ -n "$NEXTCLOUD_TRUSTED_CACERTS_DIR" ]; then
if ! echo "$NEXTCLOUD_TRUSTED_CACERTS_DIR" | grep -q "^/" || echo "$NEXTCLOUD_TRUSTED_CACERTS_DIR" | grep -q "/$"; then
print_red "You've set NEXTCLOUD_TRUSTED_CACERTS_DIR but not to an allowed value.
It should be an absolute path to a directory that starts with '/' but not end with '/'.
It is set to '$NEXTCLOUD_TRUSTED_CACERTS_DIR '."
exit 1
fi
fi
if [ -n "$NEXTCLOUD_STARTUP_APPS" ]; then
if ! echo "$NEXTCLOUD_STARTUP_APPS" | grep -q "^[a-z0-9 _-]\+$"; then
print_red "You've set NEXTCLOUD_STARTUP_APPS but not to an allowed value.
It needs to be a string. Allowed are small letters a-z, 0-9, spaces, hyphens and '_'.
It is set to '$NEXTCLOUD_STARTUP_APPS'."
exit 1
fi
fi
if [ -n "$NEXTCLOUD_ADDITIONAL_APKS" ]; then
if ! echo "$NEXTCLOUD_ADDITIONAL_APKS" | grep -q "^[a-z0-9 ._-]\+$"; then
print_red "You've set NEXTCLOUD_ADDITIONAL_APKS but not to an allowed value.
It needs to be a string. Allowed are small letters a-z, digits 0-9, spaces, hyphens, dots and '_'.
It is set to '$NEXTCLOUD_ADDITIONAL_APKS'."
exit 1
fi
fi
if [ -n "$NEXTCLOUD_ADDITIONAL_PHP_EXTENSIONS" ]; then
if ! echo "$NEXTCLOUD_ADDITIONAL_PHP_EXTENSIONS" | grep -q "^[a-z0-9 ._-]\+$"; then
print_red "You've set NEXTCLOUD_ADDITIONAL_PHP_EXTENSIONS but not to an allowed value.
It needs to be a string. Allowed are small letters a-z, digits 0-9, spaces, hyphens, dots and '_'.
It is set to '$NEXTCLOUD_ADDITIONAL_PHP_EXTENSIONS'."
exit 1
fi
fi
if [ -n "$AIO_COMMUNITY_CONTAINERS" ]; then
print_red "You've set AIO_COMMUNITY_CONTAINERS but the option was removed.
The community containers get managed via the AIO interface now."
fi
# Check if ghcr.io is reachable
# Solves issues like https://github.com/nextcloud/all-in-one/discussions/5268
if ! curl --no-progress-meter https://ghcr.io/v2/ >/dev/null; then
print_red "Could not reach https://ghcr.io."
echo "Most likely is something blocking access to it."
echo "You should be able to fix this by following https://dockerlabs.collabnix.com/intermediate/networking/Configuring_DNS.html"
echo "Another solution is using https://github.com/nextcloud/all-in-one/tree/main/manual-install"
echo "See https://github.com/nextcloud/all-in-one/blob/main/manual-install/latest.yml"
# Check DNS resolution
# Prevents issues like https://github.com/nextcloud/all-in-one/discussions/565
curl https://nextcloud.com &>/dev/null
if [ "$?" = 6 ]; then
echo "Could not resolve the host nextcloud.com."
echo "Most likely the DNS resolving does not work."
echo "You should be able to fix this by adding the '--dns=\"ip.address.of.dns.server\"' option to the docker run command."
exit 1
fi
# Check that no changes have been made to timezone settings since AIO only supports running in Etc/UTC timezone
if [ -n "$TZ" ]; then
print_red "The environmental variable TZ has been set which is not supported by AIO since it only supports running in the default Etc/UTC timezone!"
echo "The correct timezone can be set in the AIO interface later on!"
# Disable exit since it seems to be by default set on unraid and we dont want to break these instances
# exit 1
fi
# Check that http proxy or no_proxy variable is not set which AIO does not support
if [ -n "$HTTP_PROXY" ] || [ -n "$http_proxy" ] || [ -n "$HTTPS_PROXY" ] || [ -n "$https_proxy" ] || [ -n "$NO_PROXY" ] || [ -n "$no_proxy" ]; then
print_red "The environmental variable HTTP_PROXY, http_proxy, HTTPS_PROXY, https_proxy, NO_PROXY or no_proxy has been set which is not supported by AIO."
echo "If you need this, then you should use https://github.com/nextcloud/all-in-one/tree/main/manual-install"
echo "See https://github.com/nextcloud/all-in-one/blob/main/manual-install/latest.yml"
exit 1
fi
if mountpoint -q /etc/localtime; then
print_red "/etc/localtime has been mounted into the container which is not allowed because AIO only supports running in the default Etc/UTC timezone!"
echo "The correct timezone can be set in the AIO interface later on!"
exit 1
fi
if mountpoint -q /etc/timezone; then
print_red "/etc/timezone has been mounted into the container which is not allowed because AIO only supports running in the default Etc/UTC timezone!"
echo "The correct timezone can be set in the AIO interface later on!"
exit 1
fi
# Check if unsupported env are set (but don't exit as it would break many instances)
if [ -n "$APACHE_DISABLE_REWRITE_IP" ]; then
print_red "The environmental variable APACHE_DISABLE_REWRITE_IP has been set which is not supported by AIO. Please remove it!"
fi
if [ -n "$NEXTCLOUD_TRUSTED_DOMAINS" ]; then
print_red "The environmental variable NEXTCLOUD_TRUSTED_DOMAINS has been set which is not supported by AIO. Please remove it!"
fi
if [ -n "$TRUSTED_PROXIES" ]; then
print_red "The environmental variable TRUSTED_PROXIES has been set which is not supported by AIO. Please remove it!"
fi
# Add important folders
mkdir -p /mnt/docker-aio-config/data/
mkdir -p /mnt/docker-aio-config/session/
@ -373,8 +188,8 @@ chown root:root -R /mnt/docker-aio-config/certs/
# Don't allow access to the AIO interface from the Nextcloud container
# Probably more cosmetic than anything but at least an attempt
if ! grep -q '# nextcloud-aio-block' /etc/apache2/httpd.conf; then
cat << APACHE_CONF >> /etc/apache2/httpd.conf
if ! grep -q '# nextcloud-aio-block' /etc/apache2/apache2.conf; then
cat << APACHE_CONF >> /etc/apache2/apache2.conf
# nextcloud-aio-block-start
<Location />
order allow,deny
@ -401,30 +216,14 @@ if [ -f ./ssl.crt ] && [ -f ./ssl.key ]; then
cp "$GENERATED_CERTS/ssl.key" ./
fi
print_green "Initial startup of Nextcloud All-in-One complete!
print_green "Initial startup of Nextcloud All In One complete!
You should be able to open the Nextcloud AIO Interface now on port 8080 of this server!
E.g. https://internal.ip.of.this.server:8080
⚠️ Important: do always use an ip-address if you access this port and not a domain as HSTS might block access to it later!
If your server has port 80 and 8443 open and you point a domain to your server, you can get a valid certificate automatically by opening the Nextcloud AIO Interface via:
If your server has port 80 and 8443 open and you point a domain to your server, you can get a valid certificate automatially by opening the Nextcloud AIO Interface via:
https://your-domain-that-points-to-this-server.tld:8443"
# Set the timezone to Etc/UTC
export TZ=Etc/UTC
# Set the timezone to UTC
export TZ=UTC
# Fix apache startup
rm -f /var/run/apache2/httpd.pid
# Fix caddy startup
if [ -d "/mnt/docker-aio-config/caddy/locks" ]; then
rm -rf /mnt/docker-aio-config/caddy/locks/*
fi
# Fix the Caddyfile format
caddy fmt --overwrite /Caddyfile
# Fix caddy log
chmod 777 /root
# Start supervisord
exec /usr/bin/supervisord -c /supervisord.conf
exec "$@"

View file

@ -1,36 +1,26 @@
[supervisord]
nodaemon=true
nodaemon=true
logfile=/var/log/supervisord/supervisord.log
pidfile=/var/run/supervisord/supervisord.pid
childlogdir=/var/log/supervisord/
logfile_maxbytes=50MB
logfile_backups=10
loglevel=error
user=root
[program:php-fpm]
# Stdout logging is disabled as otherwise the logs are spammed
stdout_logfile=NONE
stderr_logfile=/dev/stderr
stderr_logfile_maxbytes=0
command=php-fpm
user=root
[program:apache]
# Stdout logging is disabled as otherwise the logs are spammed
stdout_logfile=NONE
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stderr
stderr_logfile_maxbytes=0
command=httpd -DFOREGROUND
user=root
command=apache2-foreground
[program:caddy]
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stderr
stderr_logfile_maxbytes=0
command=/usr/bin/caddy run --config /Caddyfile
user=www-data
command=sudo -u www-data /usr/bin/caddy run -config /Caddyfile
[program:cron]
stdout_logfile=/dev/stdout
@ -38,7 +28,6 @@ stdout_logfile_maxbytes=0
stderr_logfile=/dev/stderr
stderr_logfile_maxbytes=0
command=/cron.sh
user=root
[program:backup-time-file-watcher]
stdout_logfile=/dev/stdout
@ -46,7 +35,6 @@ stdout_logfile_maxbytes=0
stderr_logfile=/dev/stderr
stderr_logfile_maxbytes=0
command=/backup-time-file-watcher.sh
user=root
[program:session-deduplicator]
stdout_logfile=/dev/stdout
@ -54,11 +42,3 @@ stdout_logfile_maxbytes=0
stderr_logfile=/dev/stderr
stderr_logfile_maxbytes=0
command=/session-deduplicator.sh
user=root
[program:domain-validator]
# Logging is disabled as otherwise all attempts will be logged which spams the logs
stdout_logfile=NONE
stderr_logfile=NONE
command=php -S 127.0.0.1:9876 /var/www/docker-aio/php/domain-validator.php
user=www-data

View file

@ -1,95 +1,72 @@
# syntax=docker/dockerfile:latest
FROM php:8.3.30-fpm-alpine3.23
ENV PHP_MEMORY_LIMIT=512M
ENV PHP_UPLOAD_LIMIT=16G
ENV PHP_MAX_TIME=3600
ENV SOURCE_LOCATION=/usr/src/nextcloud
ENV REDIS_DB_INDEX=0
# AIO settings start # Do not remove or change this line!
ENV NEXTCLOUD_VERSION=32.0.5
ENV AIO_TOKEN=123456
ENV AIO_URL=localhost
# AIO settings end # Do not remove or change this line!
COPY --chmod=775 Containers/nextcloud/*.sh /
COPY --chmod=774 Containers/nextcloud/upgrade.exclude /upgrade.exclude
COPY Containers/nextcloud/config/*.php /
COPY Containers/nextcloud/supervisord.conf /supervisord.conf
# AIO cloning start # Do not remove or change this line!
COPY app /usr/src/nextcloud/apps/nextcloud-aio
COPY Containers/nextcloud/root.motd /root.motd
# AIO cloning end # Do not remove or change this line!
VOLUME /mnt/ncdata
VOLUME /var/www/html
# From https://github.com/nextcloud/docker/blob/master/23/fpm-alpine/Dockerfile
FROM php:8.0.22-fpm-alpine3.16
# Custom: change id of www-data user as it needs to be the same like on old installations
# hadolint ignore=SC2086,DL3003
RUN set -ex; \
apk upgrade --no-cache -a; \
apk add --no-cache shadow; \
deluser www-data; \
groupmod -g 333 xfs; \
usermod -u 333 -g 333 xfs; \
addgroup -g 33 -S www-data; \
adduser -u 33 -D -S -G www-data www-data; \
\
adduser -u 33 -D -S -G www-data www-data
# entrypoint.sh and cron.sh dependencies
RUN set -ex; \
\
apk add --no-cache \
rsync \
; \
;
# install the PHP extensions we need
# see https://docs.nextcloud.com/server/stable/admin_manual/installation/source_installation.html
ENV PHP_MEMORY_LIMIT 512M
ENV PHP_UPLOAD_LIMIT 10G
ENV PHP_MAX_TIME 3600
RUN set -ex; \
\
apk add --no-cache --virtual .build-deps \
$PHPIZE_DEPS \
autoconf \
freetype-dev \
gmp-dev \
icu-dev \
imagemagick-dev \
imagemagick-svg \
imagemagick-heic \
imagemagick-tiff \
libevent-dev \
libjpeg-turbo-dev \
libmcrypt-dev \
libmemcached-dev \
libpng-dev \
libwebp-dev \
libmemcached-dev \
libxml2-dev \
libzip-dev \
openldap-dev \
pcre-dev \
postgresql-dev \
imagemagick-dev \
libwebp-dev \
gmp-dev \
; \
\
docker-php-ext-configure gd --with-freetype --with-jpeg --with-webp; \
docker-php-ext-configure ftp --with-openssl-dir=/usr; \
docker-php-ext-configure ldap; \
docker-php-ext-install -j "$(nproc)" \
bcmath \
exif \
gd \
gmp \
intl \
ldap \
opcache \
pcntl \
pdo_mysql \
pdo_pgsql \
sysvsem \
zip \
gmp \
; \
\
# pecl will claim success even if one install fails, so we need to perform each install separately
pecl install -o igbinary-3.2.16; \
pecl install APCu-5.1.28; \
pecl install -D 'enable-memcached-igbinary="yes"' memcached-3.4.0; \
pecl install -oD 'enable-redis-igbinary="yes" enable-redis-zstd="yes" enable-redis-lz4="yes"' redis-6.3.0; \
pecl install -o imagick-3.8.1; \
pecl install APCu-5.1.21; \
pecl install memcached-3.2.0; \
pecl install redis-5.3.7; \
pecl install imagick-3.7.0; \
\
docker-php-ext-enable \
igbinary \
apcu \
memcached \
redis \
@ -103,61 +80,43 @@ RUN set -ex; \
| sort -u \
| awk 'system("[ -e /usr/local/lib/" $1 " ]") == 0 { next } { print "so:" $1 }' \
)"; \
apk add --no-cache --virtual .nextcloud-phpext-rundeps $runDeps; \
apk del .build-deps; \
\
{ \
echo 'apc.serializer=igbinary'; \
echo 'session.serialize_handler=igbinary'; \
} >> /usr/local/etc/php/conf.d/docker-php-ext-igbinary.ini; \
\
apk add --virtual .nextcloud-phpext-rundeps $runDeps; \
apk del .build-deps
# set recommended PHP.ini settings
# see https://docs.nextcloud.com/server/stable/admin_manual/installation/server_tuning.html#enable-php-opcache and below
{ \
echo 'opcache.max_accelerated_files=10000'; \
echo 'opcache.memory_consumption=256'; \
echo 'opcache.interned_strings_buffer=64'; \
# see https://docs.nextcloud.com/server/stable/admin_manual/configuration_server/server_tuning.html#enable-php-opcache
RUN { \
echo 'opcache.interned_strings_buffer=32'; \
echo 'opcache.save_comments=1'; \
echo 'opcache.revalidate_freq=60'; \
echo 'opcache.jit=1255'; \
echo 'opcache.jit_buffer_size=8M'; \
} > /usr/local/etc/php/conf.d/opcache-recommended.ini; \
\
{ \
echo 'apc.enable_cli=1'; \
echo 'apc.shm_size=64M'; \
} >> /usr/local/etc/php/conf.d/docker-php-ext-apcu.ini; \
echo 'apc.enable_cli=1' >> /usr/local/etc/php/conf.d/docker-php-ext-apcu.ini; \
\
{ \
echo 'memory_limit=${PHP_MEMORY_LIMIT}'; \
echo 'upload_max_filesize=${PHP_UPLOAD_LIMIT}'; \
echo 'post_max_size=${PHP_UPLOAD_LIMIT}'; \
echo 'max_execution_time=${PHP_MAX_TIME}'; \
echo 'max_input_time=-1'; \
echo 'default_socket_timeout=${PHP_MAX_TIME}'; \
echo 'max_input_time=${PHP_MAX_TIME}'; \
} > /usr/local/etc/php/conf.d/nextcloud.ini; \
\
{ \
echo 'session.save_handler = redis'; \
echo 'session.save_path = "tcp://${REDIS_HOST}:${REDIS_PORT}?database=${REDIS_DB_INDEX}${REDIS_USER_AUTH}&auth[]=${REDIS_HOST_PASSWORD}"'; \
echo 'redis.session.locking_enabled = 1'; \
echo 'redis.session.lock_retries = -1'; \
echo 'redis.session.lock_wait_time = 10000'; \
echo 'session.gc_maxlifetime = 86400'; \
} > /usr/local/etc/php/conf.d/redis-session.ini; \
\
mkdir -p /var/www/data; \
mkdir /var/www/data; \
chown -R www-data:root /var/www; \
chmod -R g=u /var/www; \
\
# Download Nextcloud archive start # Do not remove or change this line!
chmod -R g=u /var/www
VOLUME /var/www/html
ENV NEXTCLOUD_VERSION 24.0.4
RUN set -ex; \
apk add --no-cache --virtual .fetch-deps \
bzip2 \
gnupg \
; \
\
curl -fsSL -o nextcloud.tar.bz2 \
"https://github.com/nextcloud-releases/server/releases/download/v${NEXTCLOUD_VERSION}/nextcloud-${NEXTCLOUD_VERSION}.tar.bz2"; \
"https://download.nextcloud.com/server/releases/nextcloud-${NEXTCLOUD_VERSION}.tar.bz2"; \
curl -fsSL -o nextcloud.tar.bz2.asc \
"https://download.nextcloud.com/server/releases/nextcloud-${NEXTCLOUD_VERSION}.tar.bz2.asc"; \
export GNUPGHOME="$(mktemp -d)"; \
@ -167,22 +126,32 @@ RUN set -ex; \
tar -xjf nextcloud.tar.bz2 -C /usr/src/; \
gpgconf --kill all; \
rm nextcloud.tar.bz2.asc nextcloud.tar.bz2; \
rm -rf "$GNUPGHOME" /usr/src/nextcloud/updater; \
mkdir -p /usr/src/nextcloud/data; \
mkdir -p /usr/src/nextcloud/custom_apps; \
chmod +x /usr/src/nextcloud/occ; \
mkdir -p /usr/src/nextcloud/config; \
apk del .fetch-deps; \
# Download Nextcloud archive end # Do not remove or change this line!
mv /*.php /usr/src/nextcloud/config/; \
\
apk del .fetch-deps
COPY *.sh upgrade.exclude /
COPY config/* /usr/src/nextcloud/config/
ENTRYPOINT ["/entrypoint.sh"]
CMD ["php-fpm"]
# Template from https://github.com/nextcloud/docker/blob/master/.examples/dockerfiles/full/fpm-alpine/Dockerfile
RUN set -ex; \
\
apk add --no-cache \
ffmpeg \
imagemagick \
procps \
samba-client \
supervisor \
# libreoffice \
; \
;
RUN set -ex; \
\
apk add --no-cache --virtual .build-deps \
$PHPIZE_DEPS \
@ -191,15 +160,12 @@ RUN set -ex; \
openssl-dev \
samba-dev \
bzip2-dev \
libpq-dev \
; \
\
docker-php-ext-configure imap --with-kerberos --with-imap-ssl; \
docker-php-ext-install \
bz2 \
imap \
pgsql \
ftp \
; \
pecl install smbclient; \
docker-php-ext-enable smbclient; \
@ -210,15 +176,22 @@ RUN set -ex; \
| sort -u \
| awk 'system("[ -e /usr/local/lib/" $1 " ]") == 0 { next } { print "so:" $1 }' \
)"; \
apk add --no-cache --virtual .nextcloud-phpext-rundeps $runDeps; \
apk del .build-deps; \
\
mkdir -p \
apk add --virtual .nextcloud-phpext-rundeps $runDeps; \
apk del .build-deps
RUN mkdir -p \
/var/log/supervisord \
/var/run/supervisord \
; \
chmod 777 -R /var/log/supervisord; \
chmod 777 -R /var/run/supervisord; \
;
COPY supervisord.conf /
ENV NEXTCLOUD_UPDATE=1
CMD ["/usr/bin/supervisord", "-c", "/supervisord.conf"]
# Custom:
RUN set -ex; \
\
apk add --no-cache \
bash \
@ -228,41 +201,55 @@ RUN set -ex; \
git \
postgresql-client \
tzdata \
sudo \
grep \
nodejs \
bind-tools \
imagemagick \
imagemagick-svg \
imagemagick-heic \
imagemagick-tiff \
coreutils; \
\
mawk \
; \
rm -rf /var/lib/apt/lists/*
RUN set -ex; \
grep -q '^pm = dynamic' /usr/local/etc/php-fpm.d/www.conf; \
sed -i 's/^pm = dynamic/pm = ondemand/' /usr/local/etc/php-fpm.d/www.conf; \
# Sync this with max db connections and MaxRequestWorkers
# We don't actually expect so many children but don't want to limit it artificially because people will report issues otherwise.
# Also children will usually be terminated again after the process is done due to the ondemand setting
sed -i 's/^pm.max_children =.*/pm.max_children = 5000/' /usr/local/etc/php-fpm.d/www.conf; \
sed -i 's|access.log = /proc/self/fd/2|access.log = /proc/self/fd/1|' /usr/local/etc/php-fpm.d/docker.conf; \
\
echo "[ -n \"\$TERM\" ] && [ -f /root.motd ] && cat /root.motd" >> /root/.bashrc; \
\
sed -i 's/^pm.max_children =.*/pm.max_children = 80/' /usr/local/etc/php-fpm.d/www.conf; \
sed -i 's/^pm.start_servers =.*/pm.start_servers = 2/' /usr/local/etc/php-fpm.d/www.conf; \
sed -i 's/^pm.min_spare_servers =.*/pm.min_spare_servers = 1/' /usr/local/etc/php-fpm.d/www.conf; \
sed -i 's/^pm.max_spare_servers =.*/pm.max_spare_servers = 3/' /usr/local/etc/php-fpm.d/www.conf
RUN set -ex; \
rm -rf /tmp/nextcloud-aio && \
mkdir -p /tmp/nextcloud-aio && \
cd /tmp/nextcloud-aio && \
git clone https://github.com/nextcloud-releases/all-in-one.git --depth 1 .; \
mkdir -p /usr/src/nextcloud/apps/nextcloud-aio; \
cp -r ./app/* /usr/src/nextcloud/apps/nextcloud-aio/
RUN set -ex; \
chown www-data:root -R /usr/src && \
chmod 777 -R /usr/local/etc/php/conf.d && \
chmod 777 -R /usr/local/etc/php-fpm.d && \
chmod -R 777 /tmp; \
chmod -R 777 /etc/openldap; \
\
mkdir -p /nc-updater; \
chmod -R 777 /nc-updater
chown www-data:root -R /usr/local/etc/php/conf.d && \
chown www-data:root -R /usr/local/etc/php-fpm.d && \
chown www-data:root -R /var/log/supervisord/ && \
chown www-data:root -R /var/run/supervisord/ && \
rm -r /usr/src/nextcloud/apps/updatenotification
# hadolint ignore=DL3002
USER root
COPY start.sh /
COPY notify.sh /
RUN set -ex; \
chmod +x /start.sh && \
chmod +r /supervisord.conf && \
chmod +x /entrypoint.sh && \
chmod +r /upgrade.exclude && \
chmod +x /cron.sh && \
chmod +x /notify.sh && \
chmod +x /activate-collabora.sh
RUN set -ex; \
mkdir /mnt/ncdata; \
chown www-data:www-data /mnt/ncdata;
VOLUME /mnt/ncdata
# Give root a random password
RUN echo "root:$(openssl rand -base64 12)" | chpasswd
USER www-data
ENTRYPOINT ["/start.sh"]
CMD ["/usr/bin/supervisord", "-c", "/supervisord.conf"]
HEALTHCHECK CMD /healthcheck.sh
LABEL com.centurylinklabs.watchtower.enable="false" \
wud.watch="false" \
org.label-schema.vendor="Nextcloud"
HEALTHCHECK CMD (nc -z localhost 9000 && curl -skI localhost:7867) || exit 1

View file

@ -1,35 +0,0 @@
# Nextcloud All-in-One ``nextcloud`` Container
This folder contains the OCI/Docker container definition, along with associated resources and configuration files, for building the `nextcloud` container as part of the [Nextcloud All-in-One](https://github.com/nextcloud/all-in-one) project. This container hosts PHP and the Nextcloud Server application.
## Overview
The Nextcloud container provides the core Nextcloud application environment, including the necessary dependencies and configuration for seamless integration into the All-in-One stack. The container hosts:
- The PHP SAPI/backend (php-fpm)
- Nextcloud background jobs and scheduled tasks, which are handled via cron
- Miscellaneous minor support services specific to AIO's Nextcloud deployment (health and exec)
## Contents
- **Dockerfile**: Instructions for building the Nextcloud container image.
- **Entrypoint script**: The `start.sh` script is used for container initialization and runtime configuration before starting supervisord.
- **Nextcloud configuration files**: Specific to running in a containerized setting and/or within AIO.
- **Supervisor**: The `supervisord.conf` file defines the long-running services hosted within the container (php-fpm, cron, etc.).
## Usage
This container is intended to be used as part of the All-in-One deployment and is not meant to be used on its own. Among other requirements, it needs a web server container (which AIO provides in a dedicated Apache container). It is designed to be orchestrated by the [All-in-One mastercontainer](https://github.com/nextcloud/all-in-one/tree/main/Containers/mastercontainer) or used within an [AIO Manual Installation](https://github.com/nextcloud/all-in-one/tree/main/manual-install) or [AIO Helm chart](https://github.com/nextcloud/all-in-one/tree/main/nextcloud-aio-helm-chart).
## Documentation
- [Nextcloud All-in-One Documentation](https://github.com/nextcloud/all-in-one#readme)
- [Nextcloud Documentation](https://docs.nextcloud.com/)
## Contributing
Contributions are welcome! Please follow the Nextcloud project's guidelines and submit pull requests or issues via the main repository.
## License
This folder and its contents are licensed under the [GNU AGPLv3](https://www.gnu.org/licenses/agpl-3.0.html), in line with the rest of Nextcloud All-in-One.

View file

@ -0,0 +1,13 @@
#!/bin/bash
if [ "$COLLABORA_ENABLED" != yes ]; then
# Basically sleep for forever if collabora is not enabled
sleep inf
fi
while ! nc -z "$NC_DOMAIN" 443; do
sleep 5
done
sleep 10
echo "Activating collabora config..."
php /var/www/html/occ richdocuments:activate-config
sleep inf

View file

@ -1,5 +0,0 @@
<?php
$CONFIG = array (
'one-click-instance' => true,
'one-click-instance.user-limit' => 100,
);

View file

@ -2,20 +2,14 @@
$CONFIG = array (
'apps_paths' => array (
0 => array (
'path' => '/var/www/html/apps',
'path' => OC::$SERVERROOT.'/apps',
'url' => '/apps',
'writable' => false,
),
1 => array (
'path' => '/var/www/html/custom_apps',
'path' => OC::$SERVERROOT.'/custom_apps',
'url' => '/custom_apps',
'writable' => true,
),
),
);
if (getenv('APPS_ALLOWLIST')) {
$CONFIG['appsallowlist'] = explode(" ", getenv('APPS_ALLOWLIST'));
}
if (getenv('NEXTCLOUD_APP_STORE_URL')) {
$CONFIG['appstoreurl'] = getenv('NEXTCLOUD_APP_STORE_URL');
}

View file

@ -1,5 +0,0 @@
<?php
// Check if NEXTCLOUD_TRUSTED_CERTIFICATES_ are configured
if (str_contains(implode(' ', array_keys(getenv())), 'NEXTCLOUD_TRUSTED_CERTIFICATES_')) {
$CONFIG['default_certificates_bundle_path'] = '/var/www/html/data/certificates/ca-bundle.crt';
}

View file

@ -1,17 +0,0 @@
<?php
if (getenv('NEXTCLOUD_TRUSTED_CERTIFICATES_POSTGRES')) {
$CONFIG = array(
'pgsql_ssl' => array(
'mode' => 'verify-ca',
'rootcert' => '/var/www/html/data/certificates/ca-bundle.crt',
),
);
}
if (getenv('NEXTCLOUD_TRUSTED_CERTIFICATES_MYSQL')) {
$CONFIG = array(
'dbdriveroptions' => array(
PDO::MYSQL_ATTR_SSL_CA => '/var/www/html/data/certificates/ca-bundle.crt',
),
);
}

View file

@ -1,13 +0,0 @@
<?php
if (getenv('HTTP_PROXY')) {
$CONFIG['proxy'] = getenv('HTTP_PROXY');
}
if (getenv('HTTPS_PROXY')) {
$CONFIG['proxy'] = getenv('HTTPS_PROXY');
}
if (getenv('PROXY_USER_PASSWORD')) {
$CONFIG['proxyuserpwd'] = getenv('PROXY_USER_PASSWORD');
}
if (getenv('NO_PROXY')) {
$CONFIG['proxyexclude'] = explode(',', getenv('NO_PROXY'));
}

View file

@ -9,15 +9,9 @@ if (getenv('REDIS_HOST')) {
),
);
if (getenv('REDIS_PORT')) {
$CONFIG['redis']['port'] = (int) getenv('REDIS_PORT');
}
if (getenv('REDIS_DB_INDEX')) {
$CONFIG['redis']['dbindex'] = (int) getenv('REDIS_DB_INDEX');
}
if (getenv('REDIS_USER_AUTH') !== false) {
$CONFIG['redis']['user'] = str_replace("&auth[]=", "", getenv('REDIS_USER_AUTH'));
if (getenv('REDIS_HOST_PORT') !== false) {
$CONFIG['redis']['port'] = (int) getenv('REDIS_HOST_PORT');
} elseif (getenv('REDIS_HOST')[0] != '/') {
$CONFIG['redis']['port'] = 6379;
}
}

View file

@ -4,34 +4,24 @@ if (getenv('OBJECTSTORE_S3_BUCKET')) {
$use_path = getenv('OBJECTSTORE_S3_USEPATH_STYLE');
$use_legacyauth = getenv('OBJECTSTORE_S3_LEGACYAUTH');
$autocreate = getenv('OBJECTSTORE_S3_AUTOCREATE');
$multibucket = getenv('OBJECTSTORE_S3_MULTIBUCKET');
$CONFIG = array(
'objectstore' => array(
'class' => '\OC\Files\ObjectStore\S3',
'arguments' => array(
'multibucket' => $multibucket === 'true',
'num_buckets' => (int)getenv('OBJECTSTORE_S3_NUM_BUCKETS') ?: 64,
'bucket' => getenv('OBJECTSTORE_S3_BUCKET'),
'key' => getenv('OBJECTSTORE_S3_KEY') ?: '',
'secret' => getenv('OBJECTSTORE_S3_SECRET') ?: '',
'region' => getenv('OBJECTSTORE_S3_REGION') ?: '',
'hostname' => getenv('OBJECTSTORE_S3_HOST') ?: '',
'port' => getenv('OBJECTSTORE_S3_PORT') ?: '',
'storageClass' => getenv('OBJECTSTORE_S3_STORAGE_CLASS') ?: '',
'objectPrefix' => getenv("OBJECTSTORE_S3_OBJECT_PREFIX") ? getenv("OBJECTSTORE_S3_OBJECT_PREFIX") : "urn:oid:",
'autocreate' => strtolower($autocreate) !== 'false',
'use_ssl' => strtolower($use_ssl) !== 'false',
'autocreate' => (strtolower($autocreate) === 'false' || $autocreate == false) ? false : true,
'use_ssl' => (strtolower($use_ssl) === 'false' || $use_ssl == false) ? false : true,
// required for some non Amazon S3 implementations
'use_path_style' => strtolower($use_path) === 'true',
'use_path_style' => $use_path == true && strtolower($use_path) !== 'false',
// required for older protocol versions
'legacy_auth' => strtolower($use_legacyauth) === 'true',
'use_nextcloud_bundle' => 1,
'legacy_auth' => $use_legacyauth == true && strtolower($use_legacyauth) !== 'false'
)
)
);
$sse_c_key = getenv('OBJECTSTORE_S3_SSE_C_KEY');
if ($sse_c_key) {
$CONFIG['objectstore']['arguments']['sse_c_key'] = $sse_c_key;
}
}
}

View file

@ -1,31 +0,0 @@
<?php
if (getenv('SMTP_HOST') && getenv('MAIL_FROM_ADDRESS') && getenv('MAIL_DOMAIN')) {
$CONFIG = array (
'mail_smtpmode' => 'smtp',
'mail_smtphost' => getenv('SMTP_HOST'),
'mail_smtpport' => getenv('SMTP_PORT') ?: (getenv('SMTP_SECURE') ? 465 : 25),
'mail_smtpsecure' => getenv('SMTP_SECURE') ?: '',
'mail_smtpauth' => getenv('SMTP_NAME') && getenv('SMTP_PASSWORD'),
'mail_smtpauthtype' => getenv('SMTP_AUTHTYPE') ?: 'LOGIN',
'mail_smtpname' => getenv('SMTP_NAME') ?: '',
'mail_from_address' => getenv('MAIL_FROM_ADDRESS'),
'mail_domain' => getenv('MAIL_DOMAIN'),
);
if (getenv('SMTP_PASSWORD')) {
$CONFIG['mail_smtppassword'] = getenv('SMTP_PASSWORD');
} else {
$CONFIG['mail_smtppassword'] = '';
}
}
if (getenv('NEXTCLOUD_TRUSTED_CERTIFICATES_MAILER')) {
$CONFIG = array(
'mail_smtpstreamoptions' => array(
'ssl' => array(
'verify_peer_name' => false,
'cafile' => '/var/www/html/data/certificates/ca-bundle.crt',
)
)
);
}

View file

@ -1,18 +1,7 @@
#!/bin/bash
wait_for_cron() {
set -x
while [ -n "$(pgrep -f /var/www/html/cron.php)" ]; do
echo "Waiting for cron to stop..."
sleep 5
done
echo "Cronjob successfully exited."
exit
}
trap wait_for_cron SIGINT SIGTERM
set -eu
while true; do
php -f /var/www/html/cron.php &
sleep 5m &
wait $!
sleep 5m
done

File diff suppressed because it is too large Load diff

Some files were not shown because too many files have changed in this diff Show more