Compare commits

..

No commits in common. "main" and "v2.0.3" have entirely different histories.
main ... v2.0.3

352 changed files with 5202 additions and 22059 deletions

1
.gitattributes vendored
View file

@ -1 +0,0 @@
* text=auto

View file

@ -1,16 +1,15 @@
--- ---
name: 🐛 Bug report - no questions and no support! name: 🐛 Bug report
about: Help us improving by reporting a bug - this category is not for questions and also not for support! Please use one of the options below for questions and support about: Help us improving by reporting a bug
labels: 0. Needs triage labels: bug, 0. Needs triage
--- ---
<!--- <!--- Please keep this note for other contributors -->
- Before submitting a bug report, please read through the documentation available at https://github.com/nextcloud/all-in-one#faq ### How to use GitHub
- Additional documentation is available here: https://github.com/nextcloud/all-in-one/discussions/categories/wiki
- You should also read through existing questions and their answer here: https://github.com/nextcloud/all-in-one/discussions/categories/questions * Please use the 👍 [reaction](https://blog.github.com/2016-03-10-add-reactions-to-pull-requests-issues-and-comments/) to show that you are affected by the same issue.
- Additional threads can be found here: https://help.nextcloud.com/tag/aio * Please don't comment if you have no relevant information to add. It's just extra noise for everyone subscribed to this issue.
- Existing feature requests are listed here: https://github.com/nextcloud/all-in-one/discussions/categories/ideas * Subscribe to receive notifications on status change and new comments.
--->
<!--- Please fill out the whole template below --> <!--- Please fill out the whole template below -->
### Steps to reproduce ### Steps to reproduce
@ -23,17 +22,11 @@ labels: 0. Needs triage
### Actual behavior <!--- Tell us what happens instead --> ### Actual behavior <!--- Tell us what happens instead -->
### Other information ### Host OS <!--- (the host OS on which you are trying to install AIO on) -->
#### Host OS <!--- (the host OS on which you are trying to install AIO on) -->
#### Output of `sudo docker info`
#### Docker run command or docker-compose file that you used #### Nextcloud AIO version <!--- (see Nextcloud AIO interface) -->
#### Output of `sudo docker logs nextcloud-aio-mastercontainer` #### Current channel <!--- (see the channel name in the AIO interface) -->
#### Output of `sudo docker inspect nextcloud-aio-mastercontainer` #### Other valuable info <!--- (like logs, screenshots & Co.) -->
#### Output of `sudo docker ps -a`
#### Other valuable info <!--- (like additional logs, screenshots & Co.) -->

View file

@ -1,9 +1,15 @@
--- ---
name: 📖 Existing feature/documentation enhancement name: 📖 Existing feature/documentation enhancement
about: Suggest an enhancement of an existing feature/documentation - for other types, please use the feature request option below about: Suggest an enhancement of an existing feature/documentation - for other types, please use the feature request option below
labels: 0. Needs triage labels: enhancement, 0. Needs triage
--- ---
<!--- Please keep this note for other contributors -->
### How to use GitHub
* Please use the 👍 [reaction](https://blog.github.com/2016-03-10-add-reactions-to-pull-requests-issues-and-comments/) to show that you are interested into the same feature.
* Please don't comment if you have no relevant information to add. It's just extra noise for everyone subscribed to this issue.
* Subscribe to receive notifications on status change and new comments.
<!--- Please fill out the whole template below --> <!--- Please fill out the whole template below -->
### Is your feature request related to a problem? Please describe. ### Is your feature request related to a problem? Please describe.
<!--- A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] --> <!--- A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] -->

View file

@ -1,14 +1,14 @@
blank_issues_enabled: false blank_issues_enabled: false
contact_links: contact_links:
- name: 📘 Documentation on Nextcloud AIO
url: https://github.com/nextcloud/all-in-one#faq
about: Please read the docs first before submitting any report or request!
- name: ⛑️ Questions and support
url: https://help.nextcloud.com/tag/aio
about: For questions, support and help
- name: 💡 Suggest a new feature or discuss one - name: 💡 Suggest a new feature or discuss one
url: https://github.com/nextcloud/all-in-one/discussions/categories/ideas url: https://github.com/nextcloud/all-in-one/discussions/categories/ideas
about: For new feature requests and discussion of existing ones about: For new feature requests and discussion of existing ones
- name: ❓ Questions on AIO
url: https://github.com/nextcloud/all-in-one/discussions/categories/questions
about: For questions regarding AIO
- name: ⛑️ Community Support and Help
url: https://help.nextcloud.com/tag/aio
about: For other types of questions
- name: 💼 Nextcloud Enterprise - name: 💼 Nextcloud Enterprise
url: https://portal.nextcloud.com/ url: https://portal.nextcloud.com/
about: If you are a Nextcloud Enterprise customer, or need Professional support, so it can be resolved directly by our dedicated engineers more quickly about: If you are a Nextcloud Enterprise customer, or need Professional support, so it can be resolved directly by our dedicated engineers more quickly

156
.github/dependabot.yml vendored
View file

@ -1,62 +1,158 @@
version: 2 version: 2
updates: updates:
- package-ecosystem: "github-actions" - package-ecosystem: "github-actions"
directory: ".github/workflows" directory: "/"
schedule: schedule:
interval: "daily" interval: "daily"
time: "12:00" time: "12:00"
open-pull-requests-limit: 10 open-pull-requests-limit: 10
rebase-strategy: "disabled"
labels:
- 3. to review
- dependencies
cooldown:
default-days: 7
- package-ecosystem: composer - package-ecosystem: composer
directory: "/php/" directory: "/php/"
schedule: schedule:
interval: "daily" interval: "daily"
time: "12:00" time: "12:00"
open-pull-requests-limit: 10 open-pull-requests-limit: 10
rebase-strategy: "auto"
labels: labels:
- 3. to review - 3. to review
- dependencies - dependencies
- package-ecosystem: "docker" - package-ecosystem: "docker"
directories: directory: "/Containers/apache"
- "/Containers/alpine"
- "/Containers/apache"
- "/Containers/borgbackup"
- "/Containers/clamav"
- "/Containers/collabora"
- "/Containers/docker-socket-proxy"
- "/Containers/domaincheck"
- "/Containers/fulltextsearch"
- "/Containers/imaginary"
- "/Containers/mastercontainer"
- "/Containers/nextcloud"
- "/Containers/notify-push"
- "/Containers/onlyoffice"
- "/Containers/postgresql"
- "/Containers/redis"
- "/Containers/talk"
- "/Containers/talk-recording"
- "/Containers/watchtower"
- "/Containers/whiteboard"
schedule: schedule:
interval: "daily" interval: "daily"
time: "04:00" time: "12:00"
open-pull-requests-limit: 10 open-pull-requests-limit: 10
rebase-strategy: "disabled"
labels: labels:
- 3. to review - 3. to review
- dependencies - dependencies
- package-ecosystem: "docker"
directory: "/Containers/borgbackup"
schedule:
interval: "daily"
time: "12:00"
open-pull-requests-limit: 10
labels:
- 3. to review
- dependencies
- package-ecosystem: "docker"
directory: "/Containers/collabora"
schedule:
interval: "daily"
time: "12:00"
open-pull-requests-limit: 10
labels:
- 3. to review
- dependencies
- package-ecosystem: "docker"
directory: "/Containers/domaincheck"
schedule:
interval: "daily"
time: "12:00"
open-pull-requests-limit: 10
labels:
- 3. to review
- dependencies
- package-ecosystem: "docker"
directory: "/Containers/mastercontainer"
schedule:
interval: "daily"
time: "12:00"
ignore: ignore:
- dependency-name: "php" - dependency-name: "php"
update-types: ["version-update:semver-major", "version-update:semver-minor"] update-types: ["version-update:semver-major", "version-update:semver-minor"]
open-pull-requests-limit: 10
labels:
- 3. to review
- dependencies
- package-ecosystem: "docker"
directory: "/Containers/nextcloud"
schedule:
interval: "daily"
time: "12:00"
ignore:
- dependency-name: "php"
update-types: ["version-update:semver-major", "version-update:semver-minor"]
open-pull-requests-limit: 10
labels:
- 3. to review
- dependencies
- package-ecosystem: "docker"
directory: "/Containers/postgresql"
schedule:
interval: "daily"
time: "12:00"
ignore:
- dependency-name: "postgres" - dependency-name: "postgres"
update-types: ["version-update:semver-major"] update-types: ["version-update:semver-major"]
open-pull-requests-limit: 10
labels:
- 3. to review
- dependencies
- package-ecosystem: "docker"
directory: "/Containers/redis"
schedule:
interval: "daily"
time: "12:00"
ignore:
- dependency-name: "redis" - dependency-name: "redis"
update-types: ["version-update:semver-major"] update-types: ["version-update:semver-major"]
open-pull-requests-limit: 10
labels:
- 3. to review
- dependencies
- package-ecosystem: "docker"
directory: "/Containers/talk"
schedule:
interval: "daily"
time: "12:00"
open-pull-requests-limit: 10
labels:
- 3. to review
- dependencies
- package-ecosystem: "docker"
directory: "/Containers/watchtower"
schedule:
interval: "daily"
time: "12:00"
open-pull-requests-limit: 10
labels:
- 3. to review
- dependencies
- package-ecosystem: "docker"
directory: "/Containers/clamav"
schedule:
interval: "daily"
time: "12:00"
open-pull-requests-limit: 10
labels:
- 3. to review
- dependencies
- package-ecosystem: "docker"
directory: "/Containers/onlyoffice"
schedule:
interval: "daily"
time: "12:00"
open-pull-requests-limit: 10
labels:
- 3. to review
- dependencies
- package-ecosystem: "docker"
directory: "/Containers/imaginary"
schedule:
interval: "daily"
time: "12:00"
open-pull-requests-limit: 10
labels:
- 3. to review
- dependencies
- package-ecosystem: "docker"
directory: "/Containers/fulltextsearch"
schedule:
interval: "daily"
time: "12:00"
ignore:
- dependency-name: "elasticsearch" - dependency-name: "elasticsearch"
update-types: ["version-update:semver-major"] update-types: ["version-update:semver-major"]
open-pull-requests-limit: 10
labels:
- 3. to review
- dependencies

View file

@ -1,5 +0,0 @@
<!--
- 🚨 SECURITY INFO
-
- Before sending a pull request that fixes a security issue please report it via our HackerOne page (https://hackerone.com/nextcloud) following our security policy (https://nextcloud.com/security/). This allows us to coordinate the fix and release without potentially exposing all Nextcloud servers and users in the meantime.
-->

14
.github/release.yml vendored
View file

@ -1,14 +0,0 @@
changelog:
categories:
- title: 🏕 New features and other improvements
labels:
- enhancement
- title: 🐞 Fixed bugs
labels:
- bug
- title: 👒 Updated dependencies
labels:
- dependencies
- title: 📄 Improved documentation
labels:
- documentation

View file

@ -1,20 +0,0 @@
name: 'Codespell'
on:
pull_request:
push:
branches:
- main
jobs:
codespell:
name: Check spelling
runs-on: ubuntu-latest
steps:
- name: Check out code
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Check spelling
uses: codespell-project/actions-codespell@8f01853be192eb0f849a5c7d721450e7a467c579 # v2
with:
check_filenames: true
check_hidden: true

View file

@ -1,29 +0,0 @@
name: collabora-update
on:
workflow_dispatch:
schedule:
- cron: '00 12 * * *'
jobs:
collabora-update:
name: update collabora
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Run collabora-profile-update
run: |
rm -f php/cool-seccomp-profile.json
wget https://raw.githubusercontent.com/CollaboraOnline/online/refs/heads/main/docker/cool-seccomp-profile.json
mv cool-seccomp-profile.json php/
- name: Create Pull Request
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v7
with:
commit-message: collabora-seccomp-update automated change
signoff: true
title: collabora seccomp update
body: Automated collabora seccomp profile update
labels: dependencies, 3. to review
milestone: next
branch: collabora-seccomp-update

51
.github/workflows/command-rebase.yml vendored Normal file
View file

@ -0,0 +1,51 @@
# This workflow is provided via the organization template repository
#
# https://github.com/nextcloud/.github
# https://docs.github.com/en/actions/learn-github-actions/sharing-workflows-with-your-organization
name: Rebase command
on:
issue_comment:
types: created
permissions:
contents: read
jobs:
rebase:
runs-on: ubuntu-latest
permissions:
contents: none
# On pull requests and if the comment starts with `/rebase`
if: github.event.issue.pull_request != '' && startsWith(github.event.comment.body, '/rebase')
steps:
- name: Add reaction on start
uses: peter-evans/create-or-update-comment@v2
with:
token: ${{ secrets.COMMAND_BOT_PAT }}
repository: ${{ github.event.repository.full_name }}
comment-id: ${{ github.event.comment.id }}
reaction-type: "+1"
- name: Checkout the latest code
uses: actions/checkout@v3
with:
fetch-depth: 0
token: ${{ secrets.COMMAND_BOT_PAT }}
- name: Automatic Rebase
uses: cirrus-actions/rebase@1.7
env:
GITHUB_TOKEN: ${{ secrets.COMMAND_BOT_PAT }}
- name: Add reaction on failure
uses: peter-evans/create-or-update-comment@v2
if: failure()
with:
token: ${{ secrets.COMMAND_BOT_PAT }}
repository: ${{ github.event.repository.full_name }}
comment-id: ${{ github.event.comment.id }}
reaction-type: "-1"

View file

@ -1,37 +0,0 @@
name: Validate community containers
on:
pull_request:
paths:
- 'community-containers/**'
push:
branches:
- main
paths:
- 'community-containers/**'
jobs:
validator-community-containers:
name: Validate community containers
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Validate structure
run: |
CONTAINERS="$(find ./community-containers -mindepth 1 -maxdepth 1 -type d)"
mapfile -t CONTAINERS <<< "$CONTAINERS"
for container in "${CONTAINERS[@]}"; do
container="$(echo "$container" | sed 's|./community-containers/||')"
if ! [ -f ./community-containers/"$container"/"$container.json" ]; then
echo ".json file must be named like its parent folder $container"
FAIL=1
fi
if ! [ -f ./community-containers/"$container"/readme.md ]; then
echo "There must be a readme.md file in the folder!"
FAIL=1
fi
if [ -n "$FAIL" ]; then
exit 1
fi
done

View file

@ -0,0 +1,54 @@
name: Create Psalm Container
on:
workflow_dispatch:
schedule:
- cron: '5 4 * * *'
jobs:
push_to_registry:
runs-on: ubuntu-latest
name: Create Psalm Container
permissions:
packages: write
contents: read
steps:
- name: Check out the repo
run: |
git clone https://github.com/psalm/psalm-github-actions.git
- name: Modify the Dockerfile
run: |
set -x
sed -i 's|FROM php:7.4-alpine|FROM php:8.0-alpine|' "psalm-github-actions/Dockerfile"
cat << APCU >> "psalm-github-actions/Dockerfile"
RUN mkdir -p /usr/src/php/ext/apcu && \
curl -fsSL https://pecl.php.net/get/apcu | tar xvz -C "/usr/src/php/ext/apcu" --strip 1 && \
docker-php-ext-install apcu
APCU
- name: Log in to GitHub Docker Registry
uses: docker/login-action@v2
with:
registry: docker.pkg.github.com
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Log in to GitHub Container Registry
uses: docker/login-action@v2
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Build container image
uses: docker/build-push-action@v3
with:
push: true
context: 'psalm-github-actions'
file: 'psalm-github-actions/Dockerfile'
tags: |
ghcr.io/nextcloud/all-in-one-psalm:latest

View file

@ -1,7 +1,6 @@
name: dependency-updates name: dependency-updates
on: on:
workflow_dispatch:
schedule: schedule:
- cron: '00 12 * * *' - cron: '00 12 * * *'
@ -10,27 +9,28 @@ jobs:
name: Run dependency update script name: Run dependency update script
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - uses: actions/checkout@v3
- uses: shivammathur/setup-php@7bf05c6b704e0b9bfee22300130a31b5ea68d593 # v2 - uses: nanasess/setup-php@master
with: with:
php-version: 8.4 php-version: '8.0'
extensions: apcu
- name: Run dependency update script - name: Run dependency update script
run: | run: |
set -x set -x
curl -sS https://getcomposer.org/installer | php
mv composer.phar /usr/local/bin/composer
chmod +x /usr/local/bin/composer
cd ./php cd ./php
composer update --with-all-dependencies composer update
# Disable dependency updates for now set +e
# set +e ALL_LINES="$(composer outdated | grep -v "psr/container\|^$\|Direct dependencies\|Everything up to date\|Transitive dependencies")"
# ALL_LINES="$(composer outdated | grep -v "^$\|Direct dependencies\|Everything up to date\|Transitive dependencies")" set -e
# set -e while [ -n "$ALL_LINES" ]; do
# while [ -n "$ALL_LINES" ]; do CURRENT_LINE="$(echo "$ALL_LINES" | head -1)"
# CURRENT_LINE="$(echo "$ALL_LINES" | head -1)" composer require "$(echo "$CURRENT_LINE" | awk '{print $1}')" "^$(echo "$CURRENT_LINE" | awk '{print $4}')"
# composer require "$(echo "$CURRENT_LINE" | awk '{print $1}')" "^$(echo "$CURRENT_LINE" | awk '{print $4}')" --with-all-dependencies ALL_LINES="$(echo "$ALL_LINES" | sed '1d')"
# ALL_LINES="$(echo "$ALL_LINES" | sed '1d')" done
# done echo "outdated dependencies:
# echo "outdated dependencies: $(composer outdated)"
# $(composer outdated)"
- name: Update apcu - name: Update apcu
run: | run: |
# APCU # APCU
@ -44,12 +44,12 @@ jobs:
)" )"
sed -i "s|pecl install APCu.*\;|pecl install APCu-$apcu_version\;|" ./Containers/mastercontainer/Dockerfile sed -i "s|pecl install APCu.*\;|pecl install APCu-$apcu_version\;|" ./Containers/mastercontainer/Dockerfile
- name: Create Pull Request - name: Create Pull Request
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v7 uses: peter-evans/create-pull-request@v4
with: with:
commit-message: php dependency updates commit-message: dependency updates
signoff: true signoff: true
title: PHP dependency updates title: Dependency updates
body: Automated php dependency updates since dependabot does not support grouped updates body: Automated dependency updates since dependabot does not support grouped updates
labels: dependencies, 3. to review labels: dependencies, enhancement
milestone: next milestone: next
branch: aio-dependency-update branch: aio-dependency-update

View file

@ -1,46 +0,0 @@
name: Docker Lint
on:
pull_request:
paths:
- 'Containers/**'
push:
branches:
- main
paths:
- 'Containers/**'
permissions:
contents: read
concurrency:
group: docker-lint-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
docker-lint:
runs-on: ubuntu-latest
name: docker-lint
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Install hadolint
run: |
sudo wget https://github.com/hadolint/hadolint/releases/latest/download/hadolint-Linux-x86_64 -O /usr/bin/hadolint
sudo chmod +x /usr/bin/hadolint
- name: run lint
run: |
DOCKERFILES="$(find ./Containers -name Dockerfile)"
mapfile -t DOCKERFILES <<< "$DOCKERFILES"
for file in "${DOCKERFILES[@]}"; do
# DL3018 warning: Pin versions in apk add. Instead of `apk add <package>` use `apk add <package>=<version>`
# DL4006 warning: Set the SHELL option -o pipefail before RUN with a pipe in it. If you are using /bin/sh in an alpine image or if your shell is symlinked to busybox then consider explicitly setting your SHELL to /bin/ash, or disable this check
hadolint "$file" --ignore DL3018 --ignore DL4006 | tee -a ./hadolint.log
done
if grep -q "DL[0-9]\+\|SC[0-9]\+" ./hadolint.log; then
exit 1
fi

View file

@ -1,50 +0,0 @@
name: Block if prerelease is present
on:
pull_request:
permissions:
contents: read
jobs:
check-latest-release:
runs-on: ubuntu-latest
steps:
- name: "Check latest published release isn't a prerelease"
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v6
with:
script: |
const tags = await github.rest.repos.listTags({
owner: context.repo.owner,
repo: context.repo.repo,
per_page: 1
});
if (!tags.data || tags.data.length === 0) {
core.info('No tags found for this repository; skipping prerelease check.');
return;
}
const latestTag = tags.data[0].name;
core.info(`Latest tag found: ${latestTag}`);
try {
const { data } = await github.rest.repos.getReleaseByTag({
owner: context.repo.owner,
repo: context.repo.repo,
tag: latestTag
});
if (data.prerelease) {
core.setFailed(`Release for tag ${latestTag} (${data.tag_name}) is a prerelease. Blocking merges to main as we need to wait for the prerelease to become stable.`);
} else {
core.info(`Release for tag ${latestTag} (${data.tag_name}) is not a prerelease.`);
}
} catch (err) {
if (err.status === 404) {
core.info(`No release found for tag ${latestTag}; skipping prerelease check.`);
} else {
throw err;
}
}

View file

@ -1,51 +0,0 @@
name: Helm Chart Releaser
on:
push:
branches:
- main
paths:
- 'nextcloud-aio-helm-chart/**'
jobs:
release:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Turnstyle
uses: softprops/turnstyle@e565d2d86403c5d23533937e95980570545e5586 # v2
with:
continue-after-seconds: 180
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Fetch history
run: git fetch --prune --unshallow
- name: Configure Git
run: |
git config user.name "$GITHUB_ACTOR"
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
# See https://github.com/helm/chart-releaser-action/issues/6
- name: Set up Helm
uses: azure/setup-helm@1a275c3b69536ee54be43f2070a358922e12c8d4 # v4
with:
version: v3.6.3
- name: Run Helm Lint
run: |
helm lint ./nextcloud-aio-helm-chart
- name: Run chart-releaser
uses: helm/chart-releaser-action@cae68fefc6b5f367a0275617c9f83181ba54714f # v1.7.0
with:
mark_as_latest: false
charts_dir: .
env:
CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
CR_RELEASE_NAME_TEMPLATE: "helm-chart-{{ .Version }}"
CR_SKIP_EXISTING: true

View file

@ -1,33 +0,0 @@
name: imaginary-update
on:
workflow_dispatch:
schedule:
- cron: '00 12 * * *'
jobs:
run_update:
name: update to latest imaginary commit on master branch
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Run imaginary-update
run: |
# Imaginary
imaginary_version="$(
git ls-remote https://github.com/h2non/imaginary master \
| cut -f1 \
| tail -1
)"
sed -i "s|^ENV IMAGINARY_HASH.*$|ENV IMAGINARY_HASH=$imaginary_version|" ./Containers/imaginary/Dockerfile
- name: Create Pull Request
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v7
with:
commit-message: imaginary-update automated change
signoff: true
title: Imaginary update
body: Automated Imaginary container update
labels: dependencies, 3. to review
milestone: next
branch: imaginary-container-update

View file

@ -1,37 +1,20 @@
name: Json Validator name: Json Validator
on: on:
pull_request: pull_request:
paths: push:
- '**.json' branches:
push: - main
branches:
- main jobs:
paths: psalm:
- '**.json' name: Json Validator
runs-on: ubuntu-latest
jobs: steps:
json-validator: - name: Checkout code
name: Json Validator uses: actions/checkout@v3
runs-on: ubuntu-latest - name: Validate Json
steps: run: |
- name: Checkout code sudo apt install python3-pip --no-install-recommends
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 sudo pip3 install json-spec
- name: Validate Json json validate --schema-file=php/containers-schema.json --document-file=php/containers.json
run: |
sudo apt-get update
sudo apt-get install python3-venv -y --no-install-recommends
python3 -m venv venv
. venv/bin/activate
pip3 install json-spec
if ! json validate --schema-file=php/containers-schema.json --document-file=php/containers.json; then
exit 1
fi
JSON_FILES="$(find ./community-containers -name '*.json')"
mapfile -t JSON_FILES <<< "$JSON_FILES"
for file in "${JSON_FILES[@]}"; do
json validate --schema-file=php/containers-schema.json --document-file="$file" 2>&1 | tee -a ./json-validator.log
done
if grep -q "document does not validate with schema.\|invalid JSONFile" ./json-validator.log; then
exit 1
fi

View file

@ -1,24 +0,0 @@
name: Lint Helm Charts
on:
workflow_dispatch:
pull_request:
paths:
- 'nextcloud-aio-helm-chart/**'
jobs:
lint-helm:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
fetch-depth: 0
- name: Install Helm
uses: azure/setup-helm@1a275c3b69536ee54be43f2070a358922e12c8d4 # v4
with:
version: v3.11.1
- name: Lint charts
run: helm lint nextcloud-aio-helm-chart

View file

@ -1,67 +1,48 @@
# This workflow is provided via the organization template repository # This workflow is provided via the organization template repository
# #
# https://github.com/nextcloud/.github # https://github.com/nextcloud/.github
# https://docs.github.com/en/actions/learn-github-actions/sharing-workflows-with-your-organization # https://docs.github.com/en/actions/learn-github-actions/sharing-workflows-with-your-organization
#
# SPDX-FileCopyrightText: 2021-2024 Nextcloud GmbH and Nextcloud contributors name: Lint
# SPDX-License-Identifier: MIT
on:
name: Lint php pull_request:
push:
on: branches:
pull_request: - main
paths: - master
- 'php/**' - stable*
push:
branches: jobs:
- main php-lint:
paths: runs-on: ubuntu-latest
- 'php/**' strategy:
matrix:
permissions: php-versions: ["8.0"]
contents: read
name: php-lint
concurrency:
group: lint-php-${{ github.head_ref || github.run_id }} steps:
cancel-in-progress: true - name: Checkout
uses: actions/checkout@v3
jobs:
php-lint: - name: Set up php ${{ matrix.php-versions }}
runs-on: ubuntu-latest uses: shivammathur/setup-php@v2
strategy: with:
matrix: php-version: ${{ matrix.php-versions }}
php-versions: [ "8.4" ] coverage: none
name: php-lint - name: Lint
run: cd php && composer run lint
steps:
- name: Checkout summary:
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 runs-on: ubuntu-latest
with: needs: php-lint
persist-credentials: false
if: always()
- name: Set up php ${{ matrix.php-versions }}
uses: shivammathur/setup-php@7bf05c6b704e0b9bfee22300130a31b5ea68d593 # v2.36.0 name: php-lint-summary
with:
php-version: ${{ matrix.php-versions }} steps:
coverage: none - name: Summary status
ini-file: development run: if ${{ needs.php-lint.result != 'success' && needs.php-lint.result != 'skipped' }}; then exit 1; fi
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Lint
run: cd php && composer run lint
summary:
permissions:
contents: none
runs-on: ubuntu-latest-low
needs: php-lint
if: always()
name: php-lint-summary
steps:
- name: Summary status
run: if ${{ needs.php-lint.result != 'success' && needs.php-lint.result != 'skipped' }}; then exit 1; fi

View file

@ -1,42 +0,0 @@
# This workflow is provided via the organization template repository
#
# https://github.com/nextcloud/.github
# https://docs.github.com/en/actions/learn-github-actions/sharing-workflows-with-your-organization
#
# SPDX-FileCopyrightText: 2021-2024 Nextcloud GmbH and Nextcloud contributors
# SPDX-License-Identifier: MIT
name: Lint YAML
on:
pull_request:
paths:
- '**.yml'
permissions:
contents: read
jobs:
yaml-lint:
runs-on: ubuntu-latest
name: yaml
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.1
with:
persist-credentials: false
- name: GitHub action templates lint
uses: ibiqlik/action-yamllint@2576378a8e339169678f9939646ee3ee325e845c # v3.1.1
with:
file_or_dir: .github/workflows
config_data: |
line-length: warning
- name: Install the latest version of uv
uses: astral-sh/setup-uv@803947b9bd8e9f986429fa0c5a41c367cd732b41 # v7.2.1
- name: Check GitHub actions
run: uvx zizmor --min-severity medium .github/workflows/*.yml

View file

@ -14,7 +14,7 @@ jobs:
action: action:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: dessant/lock-threads@7266a7ce5c1df01b1c6db85bf8cd86c737dadbe7 # v5 - uses: dessant/lock-threads@v3
with: with:
issue-inactive-days: '14' issue-inactive-days: '14'
process-only: 'issues' process-only: 'issues'

View file

@ -2,7 +2,6 @@
name: nextcloud-update name: nextcloud-update
on: on:
workflow_dispatch:
schedule: schedule:
- cron: '00 12 * * *' - cron: '00 12 * * *'
@ -11,7 +10,7 @@ jobs:
name: Run nextcloud-update script name: Run nextcloud-update script
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - uses: actions/checkout@v3
- name: Run nextcloud-update script - name: Run nextcloud-update script
run: | run: |
# Inspired by https://github.com/nextcloud/docker/blob/master/update.sh # Inspired by https://github.com/nextcloud/docker/blob/master/update.sh
@ -25,7 +24,7 @@ jobs:
| sort -V \ | sort -V \
| tail -1 | tail -1
)" )"
sed -i "s|\(pecl install[^;]*APCu-\)[0-9.]*|\1$apcu_version|" ./Containers/nextcloud/Dockerfile sed -i "s|pecl install APCu.*\;|pecl install APCu-$apcu_version\;|" ./Containers/nextcloud/Dockerfile
# Memcached # Memcached
memcached_version="$( memcached_version="$(
@ -36,7 +35,7 @@ jobs:
| sort -V \ | sort -V \
| tail -1 | tail -1
)" )"
sed -i "s|\(pecl install[^;]*memcached-\)[0-9.]*|\1$memcached_version|" ./Containers/nextcloud/Dockerfile sed -i "s|pecl install memcached.*\;|pecl install memcached-$memcached_version\;|" ./Containers/nextcloud/Dockerfile
# Redis # Redis
redis_version="$( redis_version="$(
@ -47,44 +46,31 @@ jobs:
| sort -V \ | sort -V \
| tail -1 | tail -1
)" )"
sed -i "s|\(pecl install[^;]*redis-\)[0-9.]*|\1$redis_version|" ./Containers/nextcloud/Dockerfile sed -i "s|pecl install redis.*\;|pecl install redis-$redis_version\;|" ./Containers/nextcloud/Dockerfile
# Imagick # Imagick
imagick_version="$( imagick_version="$(
git ls-remote --tags https://github.com/imagick/imagick.git \ git ls-remote --tags https://github.com/mkoppanen/imagick.git \
| cut -d/ -f3 \ | cut -d/ -f3 \
| grep -viE '[a-z]' \ | grep -viE '[a-z]' \
| tr -d '^{}' \ | tr -d '^{}' \
| sort -V \ | sort -V \
| tail -1 | tail -1
)" )"
sed -i "s|\(pecl install[^;]*imagick-\)[0-9.]*|\1$imagick_version|" ./Containers/nextcloud/Dockerfile sed -i "s|pecl install imagick.*\;|pecl install imagick-$imagick_version\;|" ./Containers/nextcloud/Dockerfile
# Igbinary
igbinary_version="$(
git ls-remote --tags https://github.com/igbinary/igbinary.git \
| cut -d/ -f3 \
| grep -viE '[a-z]' \
| tr -d '^{}' \
| sort -V \
| tail -1
)"
sed -i "s|\(pecl install[^;]*igbinary-\)[0-9.]*|\1$igbinary_version|" ./Containers/nextcloud/Dockerfile
# Nextcloud # Nextcloud
NC_MAJOR="$(grep "ENV NEXTCLOUD_VERSION" ./Containers/nextcloud/Dockerfile | grep -oP '[23][0-9]')" NC_MAJOR="$(grep "ENV NEXTCLOUD_VERSION" ./Containers/nextcloud/Dockerfile | grep -oP '[23][0-9]')"
NCVERSION=$(curl -s -m 900 https://download.nextcloud.com/server/releases/ | sed --silent 's/.*href="nextcloud-\([^"]\+\).zip.asc".*/\1/p' | grep "$NC_MAJOR" | sort --version-sort | tail -1) NCVERSION=$(curl -s -m 900 https://download.nextcloud.com/server/releases/ | sed --silent 's/.*href="nextcloud-\([^"]\+\).zip.asc".*/\1/p' | grep "$NC_MAJOR" | sort --version-sort | tail -1)
if [ -n "$NCVERSION" ]; then sed -i "s|^ENV NEXTCLOUD_VERSION.*|ENV NEXTCLOUD_VERSION $NCVERSION|" ./Containers/nextcloud/Dockerfile
sed -i "s|^ENV NEXTCLOUD_VERSION.*|ENV NEXTCLOUD_VERSION=$NCVERSION|" ./Containers/nextcloud/Dockerfile
fi
- name: Create Pull Request - name: Create Pull Request
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v7 uses: peter-evans/create-pull-request@v4
with: with:
commit-message: nextcloud-update automated change commit-message: nextcloud-update automated change
signoff: true signoff: true
title: Nextcloud dependency update title: Nextcloud update
body: Automated Nextcloud container update body: Automated Nextcloud container update
labels: dependencies, 3. to review labels: dependencies, enhancement
milestone: next milestone: next
branch: nextcloud-container-update branch: nextcloud-container-update

View file

@ -1,35 +0,0 @@
name: PHP Deprecation Detector
# See https://github.com/wapmorgan/PhpDeprecationDetector
on:
pull_request:
paths:
- 'php/**'
push:
branches:
- main
paths:
- 'php/**'
jobs:
phpdd:
name: PHP Deprecation Detector
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Set up php
uses: shivammathur/setup-php@7bf05c6b704e0b9bfee22300130a31b5ea68d593 # v2
with:
php-version: 8.4
extensions: apcu
coverage: none
- name: Run script
run: |
set -x
cd php
composer install
composer run php-deprecation-detector | tee -i ./phpdd.log
if grep "Total issues:" ./phpdd.log; then
exit 1
fi

View file

@ -1,123 +0,0 @@
name: Playwright Tests on push
on:
pull_request:
paths:
- 'php/**'
push:
branches:
- main
paths:
- 'php/**'
concurrency:
group: playwright-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
env:
BASE_URL: https://localhost:8080
jobs:
test:
timeout-minutes: 60
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- uses: actions/setup-node@6044e13b5dc448c55e2357c09f80417699197238 # v6
with:
node-version: lts/*
- name: Install dependencies
run: cd php/tests && npm ci
- name: Install Playwright Browsers
run: cd php/tests && npx playwright install --with-deps chromium
- name: Set up php 8.4
uses: shivammathur/setup-php@7bf05c6b704e0b9bfee22300130a31b5ea68d593 # v2.36.0
with:
extensions: apcu
php-version: 8.4
coverage: none
ini-file: development
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Adjust some things and fix permissions
run: |
cd php
rm -r ./data
rm -r ./session
composer install --no-dev
composer clear-cache
sudo chmod 777 -R ./
- name: Start fresh development server
run: |
docker rm --force nextcloud-aio-{mastercontainer,apache,notify-push,nextcloud,redis,database,domaincheck,whiteboard,imaginary,talk,collabora,borgbackup} || true
docker volume rm nextcloud_aio_{mastercontainer,apache,database,database_dump,nextcloud,nextcloud_data,redis,backup_cache,elasticsearch} || true
docker pull ghcr.io/nextcloud-releases/all-in-one:develop
docker run \
-d \
--init \
--name nextcloud-aio-mastercontainer \
--restart always \
--publish 8080:8080 \
--volume nextcloud_aio_mastercontainer:/mnt/docker-aio-config \
--volume ./php:/var/www/docker-aio/php \
--volume /var/run/docker.sock:/var/run/docker.sock:ro \
--env SKIP_DOMAIN_VALIDATION=true \
--env APACHE_PORT=11000 \
ghcr.io/nextcloud-releases/all-in-one:develop
echo Waiting for 10 seconds for the development container to start ...
sleep 10
- name: Run Playwright tests for initial setup
run: |
cd php/tests
export DEBUG=pw:api
if ! npx playwright test tests/initial-setup.spec.js; then
docker logs nextcloud-aio-mastercontainer
docker logs nextcloud-aio-borgbackup
exit 1
fi
- name: Start fresh development server
run: |
docker rm --force nextcloud-aio-{mastercontainer,apache,notify-push,nextcloud,redis,database,domaincheck,whiteboard,imaginary,talk,collabora,borgbackup} || true
docker volume rm nextcloud_aio_{mastercontainer,apache,database,database_dump,nextcloud,nextcloud_data,redis,backup_cache,elasticsearch} || true
docker run \
-d \
--init \
--name nextcloud-aio-mastercontainer \
--restart always \
--publish 8080:8080 \
--volume nextcloud_aio_mastercontainer:/mnt/docker-aio-config \
--volume ./php:/var/www/docker-aio/php \
--volume /var/run/docker.sock:/var/run/docker.sock:ro \
--env SKIP_DOMAIN_VALIDATION=false \
--env APACHE_PORT=11000 \
ghcr.io/nextcloud-releases/all-in-one:develop
echo Waiting for 10 seconds for the development container to start ...
sleep 10
- name: Run Playwright tests for backup restore
run: |
cd php/tests
export DEBUG=pw:api
if ! npx playwright test tests/restore-instance.spec.js; then
docker logs nextcloud-aio-mastercontainer
docker logs nextcloud-aio-borgbackup
exit 1
fi
- uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6
if: ${{ !cancelled() }}
with:
name: playwright-report
path: php/tests/playwright-report/
retention-days: 14
overwrite: true

View file

@ -1,91 +0,0 @@
name: Playwright Tests
on:
workflow_dispatch:
env:
BASE_URL: https://localhost:8080
jobs:
test:
timeout-minutes: 60
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- uses: actions/setup-node@6044e13b5dc448c55e2357c09f80417699197238 # v6
with:
node-version: lts/*
- name: Install dependencies
run: cd php/tests && npm ci
- name: Install Playwright Browsers
run: cd php/tests && npx playwright install --with-deps chromium
- name: Start fresh development server
run: |
docker rm --force nextcloud-aio-{mastercontainer,apache,notify-push,nextcloud,redis,database,domaincheck,whiteboard,imaginary,talk,collabora,borgbackup} || true
docker volume rm nextcloud_aio_{mastercontainer,apache,database,database_dump,nextcloud,nextcloud_data,redis,backup_cache,elasticsearch} || true
docker pull ghcr.io/nextcloud-releases/all-in-one:develop
docker run \
-d \
--init \
--name nextcloud-aio-mastercontainer \
--restart always \
--publish 8080:8080 \
--volume nextcloud_aio_mastercontainer:/mnt/docker-aio-config \
--volume /var/run/docker.sock:/var/run/docker.sock:ro \
--env SKIP_DOMAIN_VALIDATION=true \
--env APACHE_PORT=11000 \
ghcr.io/nextcloud-releases/all-in-one:develop
echo Waiting for 10 seconds for the development container to start ...
sleep 10
- name: Run Playwright tests for initial setup
run: |
cd php/tests
export DEBUG=pw:api
if ! npx playwright test tests/initial-setup.spec.js; then
docker logs nextcloud-aio-mastercontainer
docker logs nextcloud-aio-borgbackup
exit 1
fi
- name: Start fresh development server
run: |
docker rm --force nextcloud-aio-{mastercontainer,apache,notify-push,nextcloud,redis,database,domaincheck,whiteboard,imaginary,talk,collabora,borgbackup} || true
docker volume rm nextcloud_aio_{mastercontainer,apache,database,database_dump,nextcloud,nextcloud_data,redis,backup_cache,elasticsearch} || true
docker run \
-d \
--init \
--name nextcloud-aio-mastercontainer \
--restart always \
--publish 8080:8080 \
--volume nextcloud_aio_mastercontainer:/mnt/docker-aio-config \
--volume /var/run/docker.sock:/var/run/docker.sock:ro \
--env SKIP_DOMAIN_VALIDATION=false \
--env APACHE_PORT=11000 \
ghcr.io/nextcloud-releases/all-in-one:develop
echo Waiting for 10 seconds for the development container to start ...
sleep 10
- name: Run Playwright tests for backup restore
run: |
cd php/tests
export DEBUG=pw:api
if ! npx playwright test tests/restore-instance.spec.js; then
docker logs nextcloud-aio-mastercontainer
docker logs nextcloud-aio-borgbackup
exit 1
fi
- uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6
if: ${{ !cancelled() }}
with:
name: playwright-report
path: php/tests/playwright-report/
retention-days: 14
overwrite: true

28
.github/workflows/psalm-analysis.yml vendored Normal file
View file

@ -0,0 +1,28 @@
name: Psalm Analysis
on:
pull_request:
push:
branches:
- main
jobs:
psalm:
name: Psalm
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Set up php8.0
uses: shivammathur/setup-php@v2
with:
php-version: 8.0
extensions: apcu
coverage: none
- name: Run script
run: |
set -x
cd php
composer global require vimeo/psalm --prefer-dist --no-progress --dev
composer install
composer run psalm

25
.github/workflows/psalm-security.yml vendored Normal file
View file

@ -0,0 +1,25 @@
name: Psalm Security Analysis
on:
push:
branches:
- main
jobs:
psalm:
name: Psalm
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v3
- name: Psalm
uses: docker://ghcr.io/nextcloud/all-in-one-psalm
with:
relative_dir: php
security_analysis: true
composer_ignore_platform_reqs: false
report_file: results.sarif
- name: Upload Security Analysis results to GitHub
uses: github/codeql-action/upload-sarif@v2
with:
sarif_file: php/results.sarif

View file

@ -10,12 +10,12 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - uses: actions/checkout@v3
- name: Set up php - name: Set up php8.0
uses: shivammathur/setup-php@7bf05c6b704e0b9bfee22300130a31b5ea68d593 # v2 uses: shivammathur/setup-php@v2
with: with:
php-version: 8.4 php-version: 8.0
extensions: apcu extensions: apcu
coverage: none coverage: none
@ -23,14 +23,15 @@ jobs:
run: | run: |
set -x set -x
cd php cd php
composer global require vimeo/psalm --prefer-dist --no-progress --dev
composer install composer install
composer run psalm:update-baseline composer run psalm -- --monochrome --no-progress --output-format=text --update-baseline
git clean -f lib/composer git clean -f lib/composer
git checkout composer.json composer.lock lib/composer git checkout composer.json composer.lock lib/composer
continue-on-error: true continue-on-error: true
- name: Create Pull Request - name: Create Pull Request
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v7 uses: peter-evans/create-pull-request@v4
with: with:
token: ${{ secrets.COMMAND_BOT_PAT }} token: ${{ secrets.COMMAND_BOT_PAT }}
commit-message: Update psalm baseline commit-message: Update psalm baseline
@ -38,9 +39,10 @@ jobs:
author: nextcloud-command <nextcloud-command@users.noreply.github.com> author: nextcloud-command <nextcloud-command@users.noreply.github.com>
signoff: true signoff: true
branch: automated/noid/psalm-baseline-update branch: automated/noid/psalm-baseline-update
# Make sure we can open multiple PRs
branch-suffix: timestamp
title: '[Automated] Update psalm-baseline.xml' title: '[Automated] Update psalm-baseline.xml'
milestone: next
body: | body: |
Auto-generated update psalm-baseline.xml with fixed psalm warnings Auto-generated update psalm-baseline.xml with fixed psalm warnings
labels: | labels: |
3. to review, dependencies 3. to review

View file

@ -1,56 +0,0 @@
# This workflow is provided via the organization template repository
#
# https://github.com/nextcloud/.github
# https://docs.github.com/en/actions/learn-github-actions/sharing-workflows-with-your-organization
#
# SPDX-FileCopyrightText: 2022-2024 Nextcloud GmbH and Nextcloud contributors
# SPDX-License-Identifier: MIT
name: Static analysis
on:
pull_request:
paths:
- 'php/**'
push:
branches:
- main
paths:
- 'php/**'
concurrency:
group: psalm-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
permissions:
contents: read
jobs:
static-analysis:
runs-on: ubuntu-latest
name: static-psalm-analysis
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
persist-credentials: false
- name: Set up php
uses: shivammathur/setup-php@7bf05c6b704e0b9bfee22300130a31b5ea68d593 # v2.36.0
with:
php-version: 8.4
extensions: apcu
coverage: none
ini-file: development
# Temporary workaround for missing pcntl_* in PHP 8.3
ini-values: disable_functions=
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Install dependencies and run psalm
run: |
set -x
cd php
composer install
composer run psalm

View file

@ -2,22 +2,18 @@ name: Shellcheck
on: on:
pull_request: pull_request:
paths:
- '**.sh'
push: push:
branches: branches:
- main - main
paths:
- '**.sh'
jobs: jobs:
shellcheck: shellcheck:
name: Check Shell name: Github Actions
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - uses: actions/checkout@v3
- name: Run Shellcheck - name: Run Shellcheck
uses: ludeeus/action-shellcheck@00cae500b08a931fb5698e11e79bfbd38e612a38 # v2.0.0 uses: ludeeus/action-shellcheck@master
with: with:
check_together: 'yes' check_together: 'yes'
env: env:

23
.github/workflows/spellcheck.yml vendored Normal file
View file

@ -0,0 +1,23 @@
name: 'Spellcheck'
on:
pull_request:
push:
branches:
- main
jobs:
spellcheck:
name: Check spelling
runs-on: ubuntu-latest
steps:
- name: spelling or typos
uses: actions/checkout@v3
- name: fix permission for reviewdog
run: sudo chown -R root:root $GITHUB_WORKSPACE
- name: misspell
uses: reviewdog/action-misspell@v1
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
locale: "US"
fail_on_error: true

View file

@ -1,56 +0,0 @@
name: talk-update
on:
workflow_dispatch:
schedule:
- cron: '00 12 * * *'
jobs:
talk-update:
name: update talk
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Run talk-container-update
run: |
# Recording
recording_version="$(
git ls-remote https://github.com/nextcloud/nextcloud-talk-recording v* \
| cut -d/ -f3 \
| sort -V \
| grep -E "^v[0-9\.]+$" \
| tail -1
)"
sed -i "s|^ENV RECORDING_VERSION.*$|ENV RECORDING_VERSION=$recording_version|" ./Containers/talk-recording/Dockerfile
curl -L "https://raw.githubusercontent.com/nextcloud/nextcloud-talk-recording/$recording_version/server.conf.in" -o Containers/talk-recording/recording.conf
# Signaling
signaling_version="$(
git ls-remote https://github.com/strukturag/nextcloud-spreed-signaling v*.*.* \
| cut -d/ -f3 \
| sort -V \
| grep -E "^v[0-9]+\.[0-9]+\.[0-9]+$" \
| tail -1
)"
curl -L "https://raw.githubusercontent.com/strukturag/nextcloud-spreed-signaling/$signaling_version/server.conf.in" -o Containers/talk/server.conf.in
# Janus
janus_version="$(
git ls-remote https://github.com/meetecho/janus-gateway v1.*.* \
| cut -d/ -f3 \
| sort -V \
| grep -E "^v[0-9]+\.[0-9]+\.[0-9]+$" \
| tail -1
)"
sed -i "s|^ARG JANUS_VERSION=.*$|ARG JANUS_VERSION=$janus_version|" ./Containers/talk/Dockerfile
- name: Create Pull Request
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v7
with:
commit-message: talk-update automated change
signoff: true
title: talk container update
body: Automated talk container update
labels: dependencies, 3. to review
milestone: next
branch: talk-container-update

View file

@ -1,40 +0,0 @@
name: Twig Lint
on:
pull_request:
paths:
- '**.twig'
push:
branches:
- main
paths:
- '**.twig'
permissions:
contents: read
concurrency:
group: lint-twig-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
twig-lint:
runs-on: ubuntu-latest
name: twig-lint
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Set up php ${{ matrix.php-versions }}
uses: shivammathur/setup-php@7bf05c6b704e0b9bfee22300130a31b5ea68d593 # v2
with:
php-version: 8.4
extensions: apcu
coverage: none
- name: twig lint
run: |
cd php
composer install
composer run lint:twig

View file

@ -1,11 +0,0 @@
name: Update Copyright
on:
workflow_dispatch:
jobs:
update-copyright:
name: update copyright
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2

View file

@ -1,35 +0,0 @@
name: Update Helm Chart
on:
workflow_dispatch:
schedule:
- cron: '00 12 * * *'
jobs:
update-helm:
name: update helm chart
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: update helm chart
run: |
set -x
GHCR_TOKEN="$(curl https://ghcr.io/token?scope=repository:nextcloud-releases/nce-php-fpm-mgmt:pull | jq '.token' | sed 's|"||g')"
DOCKER_TAG="$(curl -H "Authorization: Bearer ${GHCR_TOKEN}" -L -s 'https://ghcr.io/v2/nextcloud-releases/all-in-one/tags/list?page_size=1024' | jq '.tags' | sed 's|"||g;s|[[:space:]]||g;s|,||g' | grep '^20[0-9_]\+' | grep -v latest | sort -r | head -1)"
export DOCKER_TAG
set +x
if [ -n "$DOCKER_TAG" ] && ! grep -q "aio-nextcloud:$DOCKER_TAG" ./nextcloud-aio-helm-chart/templates/nextcloud-aio-nextcloud-deployment.yaml; then
sudo bash nextcloud-aio-helm-chart/update-helm.sh "$DOCKER_TAG"
fi
- name: Create Pull Request
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v7
with:
commit-message: Helm Chart updates
signoff: true
title: Helm Chart updates
body: Automated Helm Chart updates for the yaml files. It can be merged if it looks good at any time which will automatically trigger a new release of the helm chart.
labels: dependencies, 3. to review
milestone: next
branch: aio-helm-update
token: ${{ secrets.GITHUB_TOKEN }}

View file

@ -6,23 +6,22 @@ on:
- cron: '00 12 * * *' - cron: '00 12 * * *'
jobs: jobs:
update-yaml: psalm:
name: update yaml files name: update yaml files
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 uses: actions/checkout@v3
- name: update yaml files - name: update yaml files
run: | run: |
sudo bash manual-install/update-yaml.sh sudo bash manual-install/update-yaml.sh
- name: Create Pull Request - name: Create Pull Request
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v7 uses: peter-evans/create-pull-request@v4
with: with:
commit-message: Yaml updates commit-message: Yaml updates
signoff: true signoff: true
title: Yaml updates title: Yaml updates
body: Automated yaml updates for the docker-compose files. Should only be merged shortly before the next latest release. body: Automated yaml updates for the docker-compose files. Should only be merged shortly before the next latest release.
labels: dependencies, 3. to review labels: dependencies
milestone: next milestone: next
branch: aio-yaml-update branch: aio-yaml-update
token: ${{ secrets.GITHUB_TOKEN }}

View file

@ -1,37 +0,0 @@
name: watchtower-update
on:
workflow_dispatch:
schedule:
- cron: '00 12 * * *'
jobs:
watchtower-update:
name: update watchtower
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Run watchtower-container-update
run: |
# Watchtower
watchtower_version="$(
git ls-remote https://github.com/nicholas-fedor/watchtower v* \
| cut -d/ -f3 \
| sort -V \
| grep -E "^v[0-9\.]+$" \
| tail -1
)"
watchtower_commit_hash="$(git ls-remote https://github.com/nicholas-fedor/watchtower $watchtower_version | sed 's/refs.*//')"
sed -i "s|^ENV WATCHTOWER_COMMIT_HASH.*$|ENV WATCHTOWER_COMMIT_HASH=$watchtower_commit_hash|" ./Containers/watchtower/Dockerfile
sed -i "s|\$WATCHTOWER_COMMIT_HASH.*$|\$WATCHTOWER_COMMIT_HASH # $watchtower_version|" ./Containers/watchtower/Dockerfile
- name: Create Pull Request
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v7
with:
commit-message: watchtower-update automated change
signoff: true
title: watchtower container update
body: Automated watchtower container update
labels: dependencies, 3. to review
milestone: next
branch: watchtower-container-update

15
.gitignore vendored
View file

@ -1,15 +1,8 @@
.DS_Store .DS_Store
.idea/ /php/data/containers.json
*.iml /php/data/configuration.json
/php/data/backupsecret.json
/php/data/*
/php/session/*
!/php/data/.gitkeep
!/php/session/.gitkeep
/php/vendor /php/vendor
/manual-install/*.conf /manual-install/*.conf
!/manual-install/sample.conf !/manual-install/sample.conf
/manual-install/docker-compose.yml /manual-install/docker-compose.yml
/manual-install/compose.yaml
/manual-install/.env

1
CODEOWNERS Normal file
View file

@ -0,0 +1 @@
* @szaimen @juliushaertl

View file

@ -1,13 +0,0 @@
<!--
- SPDX-FileCopyrightText: 2018 Nextcloud GmbH and Nextcloud contributors
- SPDX-License-Identifier: AGPL-3.0-or-later
-->
In the Nextcloud community, participants from all over the world come together to create Free Software for a free internet. This is made possible by the support, hard work and enthusiasm of thousands of people, including those who create and use Nextcloud software.
Our code of conduct offers some guidance to ensure Nextcloud participants can cooperate effectively in a positive and inspiring atmosphere, and to explain how together we can strengthen and support each other.
The Code of Conduct is shared by all contributors and users who engage with the Nextcloud team and its community services. It presents a summary of the shared values and “common sense” thinking in our community.
You can find our full code of conduct on our website: https://nextcloud.com/code-of-conduct/
Please, keep our CoC in mind when you contribute! That way, everyone can be a part of our community in a productive, positive, creative and fun way.

View file

@ -1,7 +0,0 @@
# syntax=docker/dockerfile:latest
FROM alpine:3.23.3
RUN set -ex; \
apk upgrade --no-cache -a
LABEL org.label-schema.vendor="Nextcloud"

View file

@ -4,67 +4,63 @@
storage file_system { storage file_system {
root /mnt/data/caddy root /mnt/data/caddy
} }
servers {
# trusted_proxies placeholder
}
log {
level ERROR
}
} }
https://{$ADDITIONAL_TRUSTED_DOMAIN}:443,
http://{$APACHE_HOST}:23973, # For Collabora callback and WOPI requests, see containers.json
{$PROTOCOL}://{$NC_DOMAIN}:{$APACHE_PORT} { {$PROTOCOL}://{$NC_DOMAIN}:{$APACHE_PORT} {
header -Server
header -X-Powered-By
# Collabora
route /browser/* {
reverse_proxy {$COLLABORA_HOST}:9980
}
route /hosting/* {
reverse_proxy {$COLLABORA_HOST}:9980
}
route /cool/* {
reverse_proxy {$COLLABORA_HOST}:9980
}
# Notify Push # Notify Push
route /push/* { route /push/* {
uri strip_prefix /push uri strip_prefix /push
reverse_proxy {$NOTIFY_PUSH_HOST}:7867 reverse_proxy {$NEXTCLOUD_HOST}:7867 {
} # trusted_proxies placeholder
# Onlyoffice
route /onlyoffice/* {
uri strip_prefix /onlyoffice
reverse_proxy {$ONLYOFFICE_HOST}:80 {
header_up X-Forwarded-Host {http.request.hostport}/onlyoffice
header_up X-Forwarded-Proto https
} }
} }
# Talk # Talk
route /standalone-signaling/* { route /standalone-signaling/* {
uri strip_prefix /standalone-signaling uri strip_prefix /standalone-signaling
reverse_proxy {$TALK_HOST}:8081 reverse_proxy {$TALK_HOST}:8081 {
# trusted_proxies placeholder
}
} }
# Whiteboard # Collabora
route /whiteboard/* { route /browser/* {
uri strip_prefix /whiteboard reverse_proxy {$COLLABORA_HOST}:9980 {
reverse_proxy {$WHITEBOARD_HOST}:3002 # trusted_proxies placeholder
}
}
route /hosting/* {
reverse_proxy {$COLLABORA_HOST}:9980 {
# trusted_proxies placeholder
}
}
route /cool/* {
reverse_proxy {$COLLABORA_HOST}:9980 {
# trusted_proxies placeholder
}
}
# Onlyoffice
route /onlyoffice/* {
uri strip_prefix /onlyoffice
reverse_proxy {$ONLYOFFICE_HOST}:80 {
header_up X-Forwarded-Host {http.request.host}/onlyoffice
header_up X-Forwarded-Proto https
# trusted_proxies placeholder
}
} }
# Nextcloud # Nextcloud
route { route {
rewrite /.well-known/carddav /remote.php/dav
rewrite /.well-known/caldav /remote.php/dav
header Strict-Transport-Security max-age=31536000; header Strict-Transport-Security max-age=31536000;
reverse_proxy 127.0.0.1:8000 reverse_proxy localhost:8000 {
# See https://github.com/nextcloud/all-in-one/issues/828
# trusted_proxies placeholder
}
} }
redir /.well-known/carddav /remote.php/dav/ 301
redir /.well-known/caldav /remote.php/dav/ 301
# TLS options # TLS options
tls { tls {

View file

@ -1,92 +1,80 @@
# syntax=docker/dockerfile:latest # Caddy is a requirement
FROM caddy:2.10.2-alpine AS caddy FROM caddy:2.5.2-alpine as caddy
# From https://github.com/docker-library/httpd/blob/master/2.4/alpine/Dockerfile FROM debian:bullseye-20220912-slim
FROM httpd:2.4.66-alpine3.23
COPY --from=caddy /usr/bin/caddy /usr/bin/caddy RUN mkdir -p /mnt/data; \
chown www-data:www-data /mnt/data;
COPY --chown=33:33 Caddyfile /Caddyfile
COPY --chmod=664 nextcloud.conf /usr/local/apache2/conf/nextcloud.conf
COPY --chmod=664 supervisord.conf /supervisord.conf
COPY --chmod=775 start.sh /start.sh
COPY --chmod=775 healthcheck.sh /healthcheck.sh
VOLUME /mnt/data VOLUME /mnt/data
RUN set -ex; \ RUN set -ex; \
apk upgrade --no-cache -a; \
apk add --no-cache shadow; \
groupmod -g 33 www-data; \
usermod -u 33 -g 33 www-data; \
apk del --no-cache shadow; \
\ \
mkdir -p /mnt/data; \ apt-get update; \
chown -R www-data:www-data /mnt/data; \ apt-get install -y --no-install-recommends \
chown -R 777 /tmp; \ apache2 \
\
apk add --no-cache \
bash \
supervisor \ supervisor \
tzdata \ wget \
ca-certificates \ ca-certificates \
openssl \ openssl \
bind-tools \ netcat \
netcat-openbsd; \ dpkg-dev \
\ curl \
sed -i \ ; \
-e '/^Listen /d' \ rm -rf /var/lib/apt/lists/*
-e 's/^#\(LoadModule .*mod_rewrite.so\)/\1/' \
-e 's/^#\(LoadModule .*mod_headers.so\)/\1/' \ COPY --from=caddy /usr/bin/caddy /usr/bin/
-e 's/^#\(LoadModule .*mod_proxy.so\)/\1/' \ RUN chmod +x /usr/bin/caddy
-e 's/^#\(LoadModule .*mod_proxy_fcgi.so\)/\1/' \
-e 's/^#\(LoadModule .*mod_setenvif.so\)/\1/' \ RUN a2enmod rewrite \
-e 's/^#\(LoadModule .*mod_env.so\)/\1/' \ headers \
-e 's/^#\(LoadModule .*mod_mime.so\)/\1/' \ proxy \
-e 's/^#\(LoadModule .*mod_dir.so\)/\1/' \ proxy_fcgi \
-e 's/^#\(LoadModule .*mod_authz_core.so\)/\1/' \ setenvif \
-e 's/^#\(LoadModule .*mod_alias.so\)/\1/' \ env \
-e 's/^#\(LoadModule .*mod_mpm_event.so\)/\1/' \ mime \
-e 's/^#\(LoadModule .*mod_brotli.so\)/\1/' \ dir \
-e 's/\(LoadModule .*mod_mpm_worker.so\)/#\1/' \ authz_core \
-e 's/\(LoadModule .*mod_mpm_prefork.so\)/#\1/' \ alias
-e 's/\(ScriptAlias \)/#\1/' \
/usr/local/apache2/conf/httpd.conf; \ COPY nextcloud.conf /etc/apache2/sites-available/
echo "Include conf/nextcloud.conf" | tee -a /usr/local/apache2/conf/httpd.conf; \
echo "ServerName localhost" | tee -a /usr/local/apache2/conf/httpd.conf; \ RUN rm /etc/apache2/ports.conf; \
# Sync this with max db connections and pm.max_children sed -s -i -e "s/Include ports.conf//" /etc/apache2/apache2.conf; \
# We don't actually expect so many workers but don't want to limit it artificially because people will report issues otherwise. sed -i "/^Listen /d" /etc/apache2/apache2.conf
sed -i 's|MaxRequestWorkers.*|MaxRequestWorkers 5000|' /usr/local/apache2/conf/extra/httpd-mpm.conf; \
grep -q '<IfModule mpm_event_module>' /usr/local/apache2/conf/extra/httpd-mpm.conf; \ RUN set -ex; \
# ServerLimit needs to be set to MaxRequestWorkers divided by ThreadsPerChild which is set to 25 by default a2dissite 000-default && \
sed -i '/<IfModule mpm_event_module>/a\ \ \ \ ServerLimit 200' /usr/local/apache2/conf/extra/httpd-mpm.conf; \ a2dissite default-ssl && \
\ a2ensite nextcloud.conf && \
rm -rf /usr/local/apache2/conf/original /var/www; \ rm -rf /var/www/html/* && \
mkdir -p /var/www; \ chown www-data:www-data -R /var/log/apache2; \
chown -R www-data:www-data /var/www; \ mkdir -p /var/run/apache2; \
\ chown -R www-data:www-data /var/run/apache2; \
mkdir /var/log/supervisord; \ chown -R www-data:www-data /var/www;
RUN mkdir /var/log/supervisord; \
mkdir /var/run/supervisord; \ mkdir /var/run/supervisord; \
chown www-data:www-data /var/run/supervisord; \ chown www-data:www-data /var/run/supervisord; \
chown www-data:www-data /var/log/supervisord; \ chown www-data:www-data /var/log/supervisord;
chmod 777 /var/run/supervisord; \
chmod 777 /var/log/supervisord; \
\
chown -R www-data:www-data /usr/local/apache2; \
chmod +r -R /usr/local/apache2; \
mkdir -p /usr/local/apache2/logs; \
chmod 777 -R /home/www-data; \
chmod 777 -R /usr/local/apache2/logs; \
rm -rf /usr/local/apache2/cgi-bin/; \
\
echo "root:$(openssl rand -base64 12)" | chpasswd
USER 33 COPY Caddyfile /
ENTRYPOINT ["/start.sh"] COPY start.sh /usr/bin/
COPY healthcheck.sh /usr/bin/
COPY supervisord.conf /
RUN chmod +x /usr/bin/start.sh; \
chmod +x /usr/bin/healthcheck.sh; \
chmod +r /supervisord.conf; \
chown www-data:www-data /Caddyfile; \
chmod +r -R /etc/apache2
# Give root a random password
RUN echo "root:$(openssl rand -base64 12)" | chpasswd
USER www-data
ENTRYPOINT ["start.sh"]
CMD ["/usr/bin/supervisord", "-c", "/supervisord.conf"] CMD ["/usr/bin/supervisord", "-c", "/supervisord.conf"]
HEALTHCHECK CMD /healthcheck.sh HEALTHCHECK CMD healthcheck.sh
LABEL com.centurylinklabs.watchtower.enable="false" \
wud.watch="false" \
org.label-schema.vendor="Nextcloud"

View file

@ -1,5 +1,8 @@
#!/bin/bash #!/bin/bash
nc -z "$NEXTCLOUD_HOST" 9000 || exit 0 curl -skfI localhost:8000 || exit 1
nc -z 127.0.0.1 8000 || exit 1 if [ "$APACHE_PORT" != '443' ]; then
nc -z 127.0.0.1 "$APACHE_PORT" || exit 1 curl -skfI localhost:"$APACHE_PORT" || exit 1
else
curl -skfI https://"$NC_DOMAIN":"$APACHE_PORT" || exit 1
fi

View file

@ -1,28 +1,13 @@
Listen 8000 Listen 8000
<VirtualHost *:8000> <VirtualHost *:8000>
ServerName localhost
# Add error log # Add error log
CustomLog /proc/self/fd/1 proxy CustomLog ${APACHE_LOG_DIR}/access.log combined
LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\"" proxy ErrorLog ${APACHE_LOG_DIR}/error.log
ErrorLog /proc/self/fd/2
ErrorLogFormat "[%t] [%l] [%E] [client: %{X-Forwarded-For}i] [%M] [%{User-Agent}i]"
LogLevel warn
# PHP match # PHP match
<FilesMatch "\.php$"> <FilesMatch "\.php$">
SetHandler "proxy:fcgi://${NEXTCLOUD_HOST}:9000" SetHandler "proxy:fcgi://${NEXTCLOUD_HOST}:9000"
</FilesMatch> </FilesMatch>
<Proxy "fcgi://${NEXTCLOUD_HOST}:9000" flushpackets=on>
</Proxy>
# Enable Brotli compression for js, css and svg files - other plain files are compressed by Nextcloud by default
<IfModule mod_brotli.c>
AddOutputFilterByType BROTLI_COMPRESS text/javascript application/javascript application/x-javascript text/css image/svg+xml
BrotliCompressionQuality 0
</IfModule>
# Nextcloud dir # Nextcloud dir
DocumentRoot /var/www/html/ DocumentRoot /var/www/html/
<Directory /var/www/html/> <Directory /var/www/html/>
@ -40,6 +25,10 @@ Listen 8000
Require all denied Require all denied
</Files> </Files>
# Fix zero file sizes
# See https://github.com/nextcloud/server/issues/3056#issuecomment-954209565
SetEnv proxy-sendcl 1
# See https://httpd.apache.org/docs/current/en/mod/core.html#limitrequestbody # See https://httpd.apache.org/docs/current/en/mod/core.html#limitrequestbody
LimitRequestBody ${APACHE_MAX_SIZE} LimitRequestBody ${APACHE_MAX_SIZE}
@ -48,7 +37,4 @@ Listen 8000
# See https://httpd.apache.org/docs/current/mod/mod_proxy.html#proxytimeout # See https://httpd.apache.org/docs/current/mod/mod_proxy.html#proxytimeout
ProxyTimeout ${APACHE_MAX_TIME} ProxyTimeout ${APACHE_MAX_TIME}
# See https://httpd.apache.org/docs/trunk/mod/core.html#traceenable
TraceEnable Off
</VirtualHost> </VirtualHost>

View file

@ -17,13 +17,6 @@ while ! nc -z "$NEXTCLOUD_HOST" 9000; do
sleep 5 sleep 5
done done
# Get ipv4-address of Apache
# shellcheck disable=SC2153
IPv4_ADDRESS="$(dig "$APACHE_HOST" A +short +search | head -1)"
# Bring it in CIDR notation
# shellcheck disable=SC2001
IPv4_ADDRESS="$(echo "$IPv4_ADDRESS" | sed 's|[0-9]\+$|0/16|')"
if [ -z "$APACHE_PORT" ]; then if [ -z "$APACHE_PORT" ]; then
export APACHE_PORT="443" export APACHE_PORT="443"
fi fi
@ -42,36 +35,20 @@ if [ "$APACHE_PORT" != '443' ]; then
else else
CADDYFILE="$(sed 's|auto_https.*|auto_https disable_redirects|' /Caddyfile)" CADDYFILE="$(sed 's|auto_https.*|auto_https disable_redirects|' /Caddyfile)"
fi fi
echo "$CADDYFILE" > /tmp/Caddyfile echo "$CADDYFILE" > /Caddyfile
# Change the trusted_proxies in case of reverse proxies # Change the trusted_proxies in case of reverse proxies
if [ "$APACHE_PORT" != '443' ]; then if [ "$APACHE_PORT" != '443' ]; then
# Here the 100.64.0.0/10 range gets added which is the CGNAT range used by Tailscale nodes CADDYFILE="$(sed 's|# trusted_proxies placeholder|trusted_proxies private_ranges|' /Caddyfile)"
# See https://github.com/nextcloud/all-in-one/pull/6703 for reference
CADDYFILE="$(sed 's|# trusted_proxies placeholder|trusted_proxies static private_ranges 100.64.0.0/10|' /tmp/Caddyfile)"
else else
CADDYFILE="$(sed "s|# trusted_proxies placeholder|trusted_proxies static $IPv4_ADDRESS|" /tmp/Caddyfile)" CADDYFILE="$(sed 's|trusted_proxies private_ranges|# trusted_proxies placeholder|' /Caddyfile)"
fi fi
echo "$CADDYFILE" > /tmp/Caddyfile echo "$CADDYFILE" > /Caddyfile
# Remove additional domain if not given
if [ -z "$ADDITIONAL_TRUSTED_DOMAIN" ]; then
CADDYFILE="$(sed '/ADDITIONAL_TRUSTED_DOMAIN/d' /tmp/Caddyfile)"
fi
echo "$CADDYFILE" > /tmp/Caddyfile
# Fix the Caddyfile format
caddy fmt --overwrite /tmp/Caddyfile
# Add caddy path # Add caddy path
mkdir -p /mnt/data/caddy/ mkdir -p /mnt/data/caddy/
# Fix caddy startup # Fix apache sturtup
if [ -d "/mnt/data/caddy/locks" ]; then rm -f /var/run/apache2/apache2.pid
rm -rf /mnt/data/caddy/locks/*
fi
# Fix apache startup exec "$@"
rm -f /usr/local/apache2/logs/httpd.pid
exec "$@"

View file

@ -1,23 +1,23 @@
[supervisord] [supervisord]
nodaemon=true nodaemon=true
nodaemon=true nodaemon=true
logfile=/var/log/supervisord/supervisord.log logfile=/var/log/supervisord/supervisord.log
pidfile=/var/run/supervisord/supervisord.pid pidfile=/var/run/supervisord/supervisord.pid
childlogdir=/var/log/supervisord/ childlogdir=/var/log/supervisord/
logfile_maxbytes=50MB logfile_maxbytes=50MB
logfile_backups=10 logfile_backups=10
loglevel=error loglevel=error
[program:apache] [program:apache]
# Stdout logging is disabled as otherwise the logs are spammed stdout_logfile=/dev/stdout
stdout_logfile=NONE stdout_logfile_maxbytes=0
stderr_logfile=/dev/stderr stderr_logfile=/dev/stderr
stderr_logfile_maxbytes=0 stderr_logfile_maxbytes=0
command=apachectl -DFOREGROUND command=apachectl -DFOREGROUND
[program:caddy] [program:caddy]
stdout_logfile=/dev/stdout stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0 stdout_logfile_maxbytes=0
stderr_logfile=/dev/stderr stderr_logfile=/dev/stderr
stderr_logfile_maxbytes=0 stderr_logfile_maxbytes=0
command=/usr/bin/caddy run --config /tmp/Caddyfile command=/usr/bin/caddy run -config /Caddyfile

View file

@ -1,29 +1,23 @@
# syntax=docker/dockerfile:latest FROM debian:bullseye-20220912-slim
FROM alpine:3.23.3
RUN set -ex; \ RUN set -ex; \
\ \
apk upgrade --no-cache -a; \ apt-get update; \
apk add --no-cache \ apt-get install -y --no-install-recommends \
util-linux-misc \
bash \
borgbackup \ borgbackup \
rsync \ rsync \
fuse \ fuse \
py3-llfuse \ python3-llfuse \
jq \ jq \
openssh-client ; \
rm -rf /var/lib/apt/lists/*
VOLUME /root VOLUME /root
COPY --chmod=770 *.sh / COPY start.sh /usr/bin/
COPY borg_excludes / COPY backupscript.sh /
RUN chmod +x /usr/bin/start.sh; \
chmod +x /backupscript.sh
ENTRYPOINT ["/start.sh"]
# hadolint ignore=DL3002
USER root USER root
ENTRYPOINT ["start.sh"]
LABEL com.centurylinklabs.watchtower.enable="false" \
wud.watch="false" \
org.label-schema.vendor="Nextcloud"
ENV BORG_RETENTION_POLICY="--keep-within=7d --keep-weekly=4 --keep-monthly=6"

View file

@ -24,34 +24,22 @@ for directory in "${VOLUME_DIRS[@]}"; do
exit 1 exit 1
fi fi
done done
# Test if default volumes are there
DEFAULT_VOLUMES=(nextcloud_aio_apache nextcloud_aio_nextcloud nextcloud_aio_database nextcloud_aio_database_dump nextcloud_aio_elasticsearch nextcloud_aio_nextcloud_data nextcloud_aio_mastercontainer)
for volume in "${DEFAULT_VOLUMES[@]}"; do
if ! mountpoint -q "/nextcloud_aio_volumes/$volume"; then
echo "$volume is missing which is not intended."
exit 1
fi
done
# Check if target is mountpoint # Check if target is mountpoint
if [ -z "$BORG_REMOTE_REPO" ] && ! mountpoint -q "$MOUNT_DIR"; then if ! mountpoint -q /mnt/borgbackup; then
echo "$MOUNT_DIR is not a mountpoint which is not allowed." echo "/mnt/borgbackup is not a mountpoint which is not allowed"
exit 1 exit 1
fi fi
# Check if repo is uninitialized # Check if target is empty
if [ "$BORG_MODE" != backup ] && [ "$BORG_MODE" != test ] && ! borg info > /dev/null; then if [ "$BORG_MODE" != backup ] && [ "$BORG_MODE" != test ] && ! [ -f "$BORG_BACKUP_DIRECTORY/config" ]; then
if [ -n "$BORG_REMOTE_REPO" ]; then echo "The repository is empty. cannot perform check or restore."
echo "The repository is uninitialized or cannot connect to remote. Cannot perform check or restore."
else
echo "The repository is uninitialized. Cannot perform check or restore."
fi
exit 1 exit 1
fi fi
# Do not continue if this file exists (needed for simple external blocking) # Do not continue if this file exists (needed for simple external blocking)
if [ -z "$BORG_REMOTE_REPO" ] && [ -f "$BORG_BACKUP_DIRECTORY/aio-lockfile" ]; then if [ -f "$BORG_BACKUP_DIRECTORY/aio-lockfile" ]; then
echo "Not continuing because aio-lockfile exists it seems like a script is externally running which is locking the backup archive." echo "Not continuing because aio-lockfile exists - it seems like a script is externally running which is locking the backup archive."
echo "If this should not be the case, you can fix this by deleting the 'aio-lockfile' file from the backup archive directory." echo "If this should not be the case, you can fix this by deleting the 'aio-lockfile' file from the backup archive directory."
exit 1 exit 1
fi fi
@ -61,15 +49,6 @@ if [ "$BORG_MODE" = backup ] || [ "$BORG_MODE" = restore ]; then
touch "/nextcloud_aio_volumes/nextcloud_aio_database_dump/backup-is-running" touch "/nextcloud_aio_volumes/nextcloud_aio_database_dump/backup-is-running"
fi fi
if [ -n "$BORG_REMOTE_REPO" ] && ! [ -f "$BORGBACKUP_KEY" ]; then
echo "First run, creating borg ssh key"
ssh-keygen -f "$BORGBACKUP_KEY" -N ""
echo "You should configure the remote to accept this public key"
fi
if [ -n "$BORG_REMOTE_REPO" ] && [ -f "$BORGBACKUP_KEY.pub" ]; then
echo "Your public ssh key for borgbackup is: $(cat "$BORGBACKUP_KEY.pub")"
fi
# Do the backup # Do the backup
if [ "$BORG_MODE" = backup ]; then if [ "$BORG_MODE" = backup ]; then
@ -78,97 +57,64 @@ if [ "$BORG_MODE" = backup ]; then
echo "configuration.json not present. Cannot perform the backup!" echo "configuration.json not present. Cannot perform the backup!"
exit 1 exit 1
elif ! [ -f "/nextcloud_aio_volumes/nextcloud_aio_nextcloud/config/config.php" ]; then elif ! [ -f "/nextcloud_aio_volumes/nextcloud_aio_nextcloud/config/config.php" ]; then
echo "config.php is missing. Cannot perform backup!" echo "config.php is missing cannot perform backup"
exit 1 exit 1
elif ! [ -f "/nextcloud_aio_volumes/nextcloud_aio_database_dump/database-dump.sql" ]; then elif ! [ -f "/nextcloud_aio_volumes/nextcloud_aio_database_dump/database-dump.sql" ]; then
echo "database-dump is missing. Cannot perform backup!" echo "database-dump is missing. cannot perform backup"
echo "Please check the database container logs!"
exit 1
elif ! [ -f "/nextcloud_aio_volumes/nextcloud_aio_nextcloud_data/.ocdata" ] && ! [ -f "/nextcloud_aio_volumes/nextcloud_aio_nextcloud_data/.ncdata" ]; then
echo "The .ncdata or .ocdata file is missing in Nextcloud datadir which means it is invalid!"
echo "Is the drive where the datadir is located on still mounted?"
exit 1 exit 1
fi fi
# Test that default volumes are not empty # Test that nothing is empty
for volume in "${DEFAULT_VOLUMES[@]}"; do for directory in "${VOLUME_DIRS[@]}"; do
if [ -z "$(ls -A "/nextcloud_aio_volumes/$volume")" ] && [ "$volume" != "nextcloud_aio_elasticsearch" ]; then if [ -z "$(ls -A "$directory")" ]; then
echo "/nextcloud_aio_volumes/$volume is empty which should not happen!" echo "$directory is empty which is not allowed."
exit 1 exit 1
fi fi
done done
if [ -f "/nextcloud_aio_volumes/nextcloud_aio_database_dump/export.failed" ]; then if [ -f "/nextcloud_aio_volumes/nextcloud_aio_database_dump/export.failed" ]; then
echo "Database export failed the last time. Most likely was the export time not high enough."
echo "Cannot create a backup now." echo "Cannot create a backup now."
echo "Reason is that the database export failed the last time." echo "Please report this to https://github.com/nextcloud/all-in-one/issues. Thanks!"
echo "Most likely was the database container not correctly shut down via the AIO interface."
echo ""
echo "You might want to try the database export again manually by running the three commands:"
echo "sudo docker start nextcloud-aio-database"
echo "sleep 10"
echo "sudo docker stop nextcloud-aio-database -t 1800"
echo ""
echo "Afterwards try to create a backup again and it should hopefully work."
echo "If it should still fail, feel free to report this to https://github.com/nextcloud/all-in-one/issues and post the database container logs and the borgbackup container logs into the thread. Thanks!"
exit 1 exit 1
fi fi
if [ -z "$BORG_REMOTE_REPO" ]; then # Create backup folder
# Create backup folder mkdir -p "$BORG_BACKUP_DIRECTORY"
mkdir -p "$BORG_BACKUP_DIRECTORY"
fi
# Initialize the repository if can't get info from target # Initialize the repository if the target is empty
if ! borg info > /dev/null; then if ! [ -f "$BORG_BACKUP_DIRECTORY/config" ]; then
# Don't initialize if already initialized # Don't initialize if already initialized
if [ -f "/nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/borg.config" ]; then if [ -f "/nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/borg.config" ]; then
if [ -n "$BORG_REMOTE_REPO" ]; then echo "Cannot initialize a new repository as that was already done at least one time."
echo "Borg could not get info from the remote repo." echo "If you still want to do so, you may delete the 'borg.config' file that is stored in the mastercontainer volume manually, which will allow you to initialize a new borg repository in the chosen directory."
echo "This might be a failure to connect to the remote server. See the above borg info output for details."
else
echo "Borg could not get info from the targeted directory."
echo "This might happen if the targeted directory is located on an external drive and the drive not connected anymore. You should check this."
fi
echo "If you instead want to initialize a new backup repository, you may delete the 'borg.config' file that is stored in the mastercontainer volume manually, which will allow you to initialize a new borg repository in the chosen directory:"
echo "sudo docker exec nextcloud-aio-mastercontainer rm /mnt/docker-aio-config/data/borg.config"
exit 1 exit 1
fi fi
echo "Initializing repository..." echo "initializing repository..."
NEW_REPOSITORY=1 NEW_REPOSITORY=1
if ! borg init --debug --encryption=repokey-blake2; then if ! borg init --debug --encryption=repokey-blake2 "$BORG_BACKUP_DIRECTORY"; then
echo "Could not initialize borg repository." echo "Could not initialize borg repository."
rm -f "$BORG_BACKUP_DIRECTORY/config"
exit 1 exit 1
fi fi
borg config "$BORG_BACKUP_DIRECTORY" additional_free_space 2G
if [ -z "$BORG_REMOTE_REPO" ]; then # Fix too large Borg cache
# borg config only works for local repos; it's up to the remote to ensure the disk isn't full # https://borgbackup.readthedocs.io/en/stable/faq.html#the-borg-cache-eats-way-too-much-disk-space-what-can-i-do
borg config :: additional_free_space 2G BORG_ID="$(borg config "$BORG_BACKUP_DIRECTORY" id)"
rm -r "/root/.cache/borg/$BORG_ID/chunks.archive.d"
touch "/root/.cache/borg/$BORG_ID/chunks.archive.d"
# Fix too large Borg cache # Make a backup from the borg config file
# https://borgbackup.readthedocs.io/en/stable/faq.html#the-borg-cache-eats-way-too-much-disk-space-what-can-i-do if ! [ -f "$BORG_BACKUP_DIRECTORY/config" ]; then
BORG_ID="$(borg config :: id)" echo "The borg config file wasn't created. Something is wrong."
rm -r "/root/.cache/borg/$BORG_ID/chunks.archive.d"
touch "/root/.cache/borg/$BORG_ID/chunks.archive.d"
fi
if ! borg info > /dev/null; then
echo "Borg can't get info from the repo it created. Something is wrong."
exit 1 exit 1
fi fi
rm -f "/nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/borg.config" rm -f "/nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/borg.config"
if [ -n "$BORG_REMOTE_REPO" ]; then if ! cp "$BORG_BACKUP_DIRECTORY/config" "/nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/borg.config"; then
# `borg config` does not support remote repos so instead create a dummy file and rely on the remote to avoid echo "Could not copy config file to second place. Cannot perform backup."
# corruption of the config file (which contains the encryption key). We don't actually use the contents of exit 1
# this file anywhere, so a touch is all we need so we remember we already initialized the repo.
touch "/nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/borg.config"
else
# Make a backup from the borg config file
if ! cp "$BORG_BACKUP_DIRECTORY/config" "/nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/borg.config"; then
echo "Could not copy config file to second place. Cannot perform backup."
exit 1
fi
fi fi
echo "Repository successfully initialized." echo "Repository successfully initialized."
@ -180,43 +126,15 @@ if [ "$BORG_MODE" = backup ]; then
# Borg options # Borg options
# auto,zstd compression seems to has the best ratio based on: # auto,zstd compression seems to has the best ratio based on:
# https://forum.level1techs.com/t/optimal-compression-for-borg-backups/145870/6 # https://forum.level1techs.com/t/optimal-compression-for-borg-backups/145870/6
BORG_OPTS=(-v --stats --compression "auto,zstd") BORG_OPTS=(--stats --compression "auto,zstd" --exclude-caches --checkpoint-interval 86400)
if [ "$NEW_REPOSITORY" = 1 ]; then
BORG_OPTS+=(--progress)
fi
# Exclude the nextcloud log and audit log for GDPR reasons
BORG_EXCLUDE=(--exclude "/nextcloud_aio_volumes/nextcloud_aio_nextcloud/data/nextcloud.log*" --exclude "/nextcloud_aio_volumes/nextcloud_aio_nextcloud/data/audit.log" --exclude "/nextcloud_aio_volumes/nextcloud_aio_nextcloud_data/lost+found")
BORG_INCLUDE=()
# Exclude datadir if .noaiobackup file was found
# shellcheck disable=SC2144
if [ -f "/nextcloud_aio_volumes/nextcloud_aio_nextcloud_data/.noaiobackup" ]; then
BORG_EXCLUDE+=(--exclude "/nextcloud_aio_volumes/nextcloud_aio_nextcloud_data/")
BORG_INCLUDE+=(--pattern="+/nextcloud_aio_volumes/nextcloud_aio_nextcloud_data/.noaiobackup")
echo "⚠️⚠️⚠️ '.noaiobackup' file was found in Nextcloud's data directory. Excluding the data directory from backup!"
# Exclude preview folder if .noaiobackup file was found
elif [ -f /nextcloud_aio_volumes/nextcloud_aio_nextcloud_data/appdata_*/preview/.noaiobackup ]; then
BORG_EXCLUDE+=(--exclude "/nextcloud_aio_volumes/nextcloud_aio_nextcloud_data/appdata_*/preview/")
BORG_INCLUDE+=(--pattern="+/nextcloud_aio_volumes/nextcloud_aio_nextcloud_data/appdata_*/preview/.noaiobackup")
echo "⚠️⚠️⚠️ '.noaiobackup' file was found in the preview directory. Excluding the preview directory from backup!"
fi
# Make sure that there is always a borg.config file before creating a new backup
if ! [ -f "/nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/borg.config" ]; then
echo "Did not find borg.config file in the mastercontainer volume."
echo "Cannot create a backup as this is wrong."
exit 1
fi
# Create the backup # Create the backup
echo "Starting the backup..." echo "Starting the backup..."
get_start_time get_start_time
if ! borg create "${BORG_OPTS[@]}" "${BORG_INCLUDE[@]}" "${BORG_EXCLUDE[@]}" "::$CURRENT_DATE-nextcloud-aio" "/nextcloud_aio_volumes/" --exclude-from /borg_excludes; then if ! borg create "${BORG_OPTS[@]}" "$BORG_BACKUP_DIRECTORY::$CURRENT_DATE-nextcloud-aio" "/nextcloud_aio_volumes/"; then
echo "Deleting the failed backup archive..." echo "Deleting the failed backup archive..."
borg delete --stats "::$CURRENT_DATE-nextcloud-aio" borg delete --stats "$BORG_BACKUP_DIRECTORY::$CURRENT_DATE-nextcloud-aio"
echo "Backup failed!" echo "Backup failed!"
echo "You might want to check the backup integrity via the AIO interface."
if [ "$NEW_REPOSITORY" = 1 ]; then if [ "$NEW_REPOSITORY" = 1 ]; then
echo "Deleting borg.config file so that you can choose a different location for the backup." echo "Deleting borg.config file so that you can choose a different location for the backup."
rm "/nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/borg.config" rm "/nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/borg.config"
@ -228,23 +146,15 @@ if [ "$BORG_MODE" = backup ]; then
rm -f "/nextcloud_aio_volumes/nextcloud_aio_nextcloud_data/skip.update" rm -f "/nextcloud_aio_volumes/nextcloud_aio_nextcloud_data/skip.update"
# Prune options # Prune options
read -ra BORG_PRUNE_OPTS <<< "$BORG_RETENTION_POLICY" BORG_PRUNE_OPTS=(--stats --keep-within=7d --keep-weekly=4 --keep-monthly=6 "$BORG_BACKUP_DIRECTORY")
echo "BORG_PRUNE_OPTS are ${BORG_PRUNE_OPTS[*]}"
# Prune archives # Prune archives
echo "Pruning the archives..." echo "Pruning the archives..."
if ! borg prune --stats --glob-archives '*_*-nextcloud-aio' "${BORG_PRUNE_OPTS[@]}"; then if ! borg prune --prefix '*_*-nextcloud-aio' "${BORG_PRUNE_OPTS[@]}"; then
echo "Failed to prune archives!" echo "Failed to prune archives!"
exit 1 exit 1
fi fi
# Compact archives
echo "Compacting the archives..."
if ! borg compact; then
echo "Failed to compact archives!"
exit 1
fi
# Back up additional directories of the host # Back up additional directories of the host
if [ "$ADDITIONAL_DIRECTORIES_BACKUP" = 'yes' ]; then if [ "$ADDITIONAL_DIRECTORIES_BACKUP" = 'yes' ]; then
if [ -d "/docker_volumes/" ]; then if [ -d "/docker_volumes/" ]; then
@ -256,23 +166,17 @@ if [ "$BORG_MODE" = backup ]; then
exit 1 exit 1
fi fi
done done
echo "Starting the backup for additional volumes..." if ! borg create "${BORG_OPTS[@]}" "$BORG_BACKUP_DIRECTORY::$CURRENT_DATE-additional-docker-volumes" "/docker_volumes/"; then
if ! borg create "${BORG_OPTS[@]}" "::$CURRENT_DATE-additional-docker-volumes" "/docker_volumes/"; then
echo "Deleting the failed backup archive..." echo "Deleting the failed backup archive..."
borg delete --stats "::$CURRENT_DATE-additional-docker-volumes" borg delete --stats "$BORG_BACKUP_DIRECTORY::$CURRENT_DATE-additional-docker-volumes"
echo "Backup of additional docker-volumes failed!" echo "Backup of additional docker-volumes failed!"
exit 1 exit 1
fi fi
echo "Pruning additional volumes..."
if ! borg prune --stats --glob-archives '*_*-additional-docker-volumes' "${BORG_PRUNE_OPTS[@]}"; then if ! borg prune --prefix '*_*-additional-docker-volumes' "${BORG_PRUNE_OPTS[@]}"; then
echo "Failed to prune additional docker-volumes archives!" echo "Failed to prune additional docker-volumes archives!"
exit 1 exit 1
fi fi
echo "Compacting additional volumes..."
if ! borg compact; then
echo "Failed to compact additional docker-volume archives!"
exit 1
fi
fi fi
if [ -d "/host_mounts/" ]; then if [ -d "/host_mounts/" ]; then
EXCLUDED_DIRECTORIES=(home/*/.cache root/.cache var/cache lost+found run var/run dev tmp sys proc) EXCLUDED_DIRECTORIES=(home/*/.cache root/.cache var/cache lost+found run var/run dev tmp sys proc)
@ -286,29 +190,22 @@ if [ "$BORG_MODE" = backup ]; then
do do
EXCLUDE_DIRS+=(--exclude "/host_mounts/$directory/") EXCLUDE_DIRS+=(--exclude "/host_mounts/$directory/")
done done
echo "Starting the backup for additional host mounts..." if ! borg create "${BORG_OPTS[@]}" "${EXCLUDE_DIRS[@]}" "$BORG_BACKUP_DIRECTORY::$CURRENT_DATE-additional-host-mounts" "/host_mounts/"; then
if ! borg create "${BORG_OPTS[@]}" "${EXCLUDE_DIRS[@]}" "::$CURRENT_DATE-additional-host-mounts" "/host_mounts/"; then
echo "Deleting the failed backup archive..." echo "Deleting the failed backup archive..."
borg delete --stats "::$CURRENT_DATE-additional-host-mounts" borg delete --stats "$BORG_BACKUP_DIRECTORY::$CURRENT_DATE-additional-host-mounts"
echo "Backup of additional host-mounts failed!" echo "Backup of additional host-mounts failed!"
exit 1 exit 1
fi fi
echo "Pruning additional host mounts..." if ! borg prune --prefix '*_*-additional-host-mounts' "${BORG_PRUNE_OPTS[@]}"; then
if ! borg prune --stats --glob-archives '*_*-additional-host-mounts' "${BORG_PRUNE_OPTS[@]}"; then
echo "Failed to prune additional host-mount archives!" echo "Failed to prune additional host-mount archives!"
exit 1 exit 1
fi fi
echo "Compacting additional host mounts..."
if ! borg compact; then
echo "Failed to compact additional host-mount archives!"
exit 1
fi
fi fi
fi fi
# Inform user # Inform user
get_expiration_time get_expiration_time
echo "Backup finished successfully on $END_DATE_READABLE ($DURATION_READABLE)." echo "Backup finished successfully on $END_DATE_READABLE ($DURATION_READABLE)"
if [ -f "/nextcloud_aio_volumes/nextcloud_aio_nextcloud_data/update.failed" ]; then if [ -f "/nextcloud_aio_volumes/nextcloud_aio_nextcloud_data/update.failed" ]; then
echo "However a Nextcloud update failed. So reporting that the backup failed which will skip any update attempt the next time." echo "However a Nextcloud update failed. So reporting that the backup failed which will skip any update attempt the next time."
echo "Please restore a backup from before the failed Nextcloud update attempt." echo "Please restore a backup from before the failed Nextcloud update attempt."
@ -321,38 +218,17 @@ fi
if [ "$BORG_MODE" = restore ]; then if [ "$BORG_MODE" = restore ]; then
get_start_time get_start_time
# Pick archive to restore # Perform the restore
if [ -n "$SELECTED_RESTORE_TIME" ]; then if [ -n "$SELECTED_RESTORE_TIME" ]; then
SELECTED_ARCHIVE="$(borg list | grep "nextcloud-aio" | grep "$SELECTED_RESTORE_TIME" | awk -F " " '{print $1}' | head -1)" SELECTED_ARCHIVE="$(borg list "$BORG_BACKUP_DIRECTORY" | grep "nextcloud-aio" | grep "$SELECTED_RESTORE_TIME" | awk -F " " '{print $1}' | head -1)"
else else
SELECTED_ARCHIVE="$(borg list | grep "nextcloud-aio" | awk -F " " '{print $1}' | sort -r | head -1)" SELECTED_ARCHIVE="$(borg list "$BORG_BACKUP_DIRECTORY" | grep "nextcloud-aio" | awk -F " " '{print $1}' | sort -r | head -1)"
fi fi
echo "Restoring '$SELECTED_ARCHIVE'..." echo "Restoring '$SELECTED_ARCHIVE'..."
mkdir -p /tmp/borg
ADDITIONAL_RSYNC_EXCLUDES=() if ! borg mount "$BORG_BACKUP_DIRECTORY::$SELECTED_ARCHIVE" /tmp/borg; then
ADDITIONAL_BORG_EXCLUDES=() echo "Could not mount the backup!"
ADDITIONAL_FIND_EXCLUDES=() exit 1
# Exclude datadir if .noaiobackup file was found
# shellcheck disable=SC2144
if [ -f "/nextcloud_aio_volumes/nextcloud_aio_nextcloud_data/.noaiobackup" ]; then
# Keep these 3 in sync. Beware, the pattern syntax and the paths differ
ADDITIONAL_RSYNC_EXCLUDES=(--exclude "nextcloud_aio_nextcloud_data/**")
ADDITIONAL_BORG_EXCLUDES=(--exclude "sh:nextcloud_aio_volumes/nextcloud_aio_nextcloud_data/**")
ADDITIONAL_FIND_EXCLUDES=(-o -regex 'nextcloud_aio_volumes/nextcloud_aio_nextcloud_data\(/.*\)?')
echo "⚠️⚠️⚠️ '.noaiobackup' file was found in Nextcloud's data directory. Excluding the data directory from restore!"
echo "You might run into problems due to this afterwards as potentially this makes the directory go out of sync with the database."
echo "You might be able to fix this by running 'occ files:scan --all' and 'occ maintenance:repair' and 'occ files:scan-app-data' after the restore."
echo "See https://github.com/nextcloud/all-in-one#how-to-run-occ-commands"
# Exclude previews from restore if selected to speed up process or exclude preview folder if .noaiobackup file was found
elif [ -n "$RESTORE_EXCLUDE_PREVIEWS" ] || [ -f /nextcloud_aio_volumes/nextcloud_aio_nextcloud_data/appdata_*/preview/.noaiobackup ]; then
# Keep these 3 in sync. Beware, the pattern syntax and the paths differ
ADDITIONAL_RSYNC_EXCLUDES=(--exclude "nextcloud_aio_nextcloud_data/appdata_*/preview/**")
ADDITIONAL_BORG_EXCLUDES=(--exclude "sh:nextcloud_aio_volumes/nextcloud_aio_nextcloud_data/appdata_*/preview/**")
ADDITIONAL_FIND_EXCLUDES=(-o -regex 'nextcloud_aio_volumes/nextcloud_aio_nextcloud_data/appdata_[^/]*/preview\(/.*\)?')
echo "⚠️⚠️⚠️ Excluding previews from restore!"
echo "You might run into problems due to this afterwards as potentially this makes the directory go out of sync with the database."
echo "You might be able to fix this by running 'occ files:scan-app-data preview' after the restore."
echo "See https://github.com/nextcloud/all-in-one#how-to-run-occ-commands"
fi fi
# Save Additional Backup dirs # Save Additional Backup dirs
@ -365,12 +241,23 @@ if [ "$BORG_MODE" = restore ]; then
DAILY_BACKUPTIME="$(cat /nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/daily_backup_time)" DAILY_BACKUPTIME="$(cat /nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/daily_backup_time)"
fi fi
# Restore everything except the configuration file
if ! rsync --stats --archive --human-readable -vv --delete \
--exclude "nextcloud_aio_mastercontainer/session/"** \
--exclude "nextcloud_aio_mastercontainer/certs/"** \
--exclude "nextcloud_aio_mastercontainer/data/daily_backup_running" \
--exclude "nextcloud_aio_mastercontainer/data/configuration.json" \
/tmp/borg/nextcloud_aio_volumes/ /nextcloud_aio_volumes; then
echo "Something failed while restoring from backup."
umount /tmp/borg
exit 1
fi
# Save current aio password # Save current aio password
AIO_PASSWORD="$(jq '.password' /nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/configuration.json)" AIO_PASSWORD="$(jq '.password' /nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/configuration.json)"
# Save current backup location vars # Save current path
BORG_LOCATION="$(jq '.borg_backup_host_location' /nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/configuration.json)" BORG_LOCATION="$(jq '.borg_backup_host_location' /nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/configuration.json)"
REMOTE_REPO="$(jq '.borg_remote_repo' /nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/configuration.json)"
# Save current nextcloud datadir # Save current nextcloud datadir
if grep -q '"nextcloud_datadir":' /nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/configuration.json; then if grep -q '"nextcloud_datadir":' /nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/configuration.json; then
@ -379,116 +266,22 @@ if [ "$BORG_MODE" = restore ]; then
NEXTCLOUD_DATADIR='""' NEXTCLOUD_DATADIR='""'
fi fi
if [ -z "$BORG_REMOTE_REPO" ]; then # Restore the configuration file
mkdir -p /tmp/borg if ! rsync --archive --human-readable -vv \
if ! borg mount "::$SELECTED_ARCHIVE" /tmp/borg; then /tmp/borg/nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/configuration.json \
echo "Could not mount the backup!" /nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/configuration.json; then
exit 1 echo "Something failed while restoring the configuration.json."
fi umount /tmp/borg
exit 1
# Restore everything except the configuration file
#
# These exclude patterns need to be kept in sync with the borg_excludes file and the find excludes in this file,
# which use a different syntax (patterns appear in 3 places in total)
if ! rsync --stats --archive --human-readable -vv --delete \
--exclude "nextcloud_aio_apache/caddy/**" \
--exclude "nextcloud_aio_mastercontainer/caddy/**" \
--exclude "nextcloud_aio_nextcloud/data/nextcloud.log*" \
--exclude "nextcloud_aio_nextcloud/data/audit.log" \
--exclude "nextcloud_aio_mastercontainer/certs/**" \
--exclude "nextcloud_aio_mastercontainer/data/configuration.json" \
--exclude "nextcloud_aio_mastercontainer/data/daily_backup_running" \
--exclude "nextcloud_aio_mastercontainer/data/session_date_file" \
--exclude "nextcloud_aio_mastercontainer/session/**" \
--exclude "nextcloud_aio_nextcloud_data/lost+found" \
"${ADDITIONAL_RSYNC_EXCLUDES[@]}" \
/tmp/borg/nextcloud_aio_volumes/ /nextcloud_aio_volumes/; then
RESTORE_FAILED=1
echo "Something failed while restoring from backup."
fi
# Restore the configuration file
if ! rsync --archive --human-readable -vv \
/tmp/borg/nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/configuration.json \
/nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/configuration.json; then
RESTORE_FAILED=1
echo "Something failed while restoring the configuration.json."
fi
if ! umount /tmp/borg; then
echo "Failed to unmount the borg archive but should still be able to restore successfully"
fi
else
# Restore nearly everything
#
# borg mount is really slow for remote repos (did not check whether it's slow for local repos too),
# using extract to /tmp would require temporarily storing a second copy of the data.
# So instead extract directly on top of the destination with exclude patterns for the config, but
# then we do still need to delete local files which are not present in the archive.
#
# Older backups may still contain files we've since excluded, so we have to exclude on extract as well.
cd / # borg extract has no destination arg and extracts to CWD
if ! borg extract "::$SELECTED_ARCHIVE" --progress --exclude-from /borg_excludes "${ADDITIONAL_BORG_EXCLUDES[@]}" --pattern '+nextcloud_aio_volumes/**'
then
RESTORE_FAILED=1
echo "Failed to extract backup archive."
else
# Delete files/dirs present locally, but not in the backup archive, excluding conf files
# https://unix.stackexchange.com/a/759341
# This comm does not support -z, but I doubt any file names would have \n in them
#
# These find patterns need to be kept in sync with the borg_excludes file and the rsync excludes in this
# file, which use a different syntax (patterns appear in 3 places in total)
echo "Deleting local files which do not exist in the backup"
if ! find nextcloud_aio_volumes \
-not \( \
-path nextcloud_aio_volumes/nextcloud_aio_apache/caddy \
-o -path "nextcloud_aio_volumes/nextcloud_aio_apache/caddy/*" \
-o -path nextcloud_aio_volumes/nextcloud_aio_mastercontainer/caddy \
-o -path "nextcloud_aio_volumes/nextcloud_aio_mastercontainer/caddy/*" \
-o -path nextcloud_aio_volumes/nextcloud_aio_mastercontainer/certs \
-o -path "nextcloud_aio_volumes/nextcloud_aio_mastercontainer/certs/*" \
-o -path nextcloud_aio_volumes/nextcloud_aio_mastercontainer/session \
-o -path "nextcloud_aio_volumes/nextcloud_aio_mastercontainer/session/*" \
-o -path "nextcloud_aio_volumes/nextcloud_aio_nextcloud/data/nextcloud.log*" \
-o -path nextcloud_aio_volumes/nextcloud_aio_nextcloud/data/audit.log \
-o -path nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/daily_backup_running \
-o -path nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/session_date_file \
-o -path "nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/id_borg*" \
-o -path "nextcloud_aio_nextcloud_data/lost+found" \
"${ADDITIONAL_FIND_EXCLUDES[@]}" \
\) \
| LC_ALL=C sort \
| LC_ALL=C comm -23 - \
<(borg list "::$SELECTED_ARCHIVE" --short --exclude-from /borg_excludes --pattern '+nextcloud_aio_volumes/**' | LC_ALL=C sort) \
> /tmp/local_files_not_in_backup
then
RESTORE_FAILED=1
echo "Failed to delete local files not in backup archive."
else
# More robust than e.g. xargs as I got a ~"args line too long" error while testing that, but it's slower
# https://stackoverflow.com/a/21848934
while IFS= read -r file
do rm -vrf -- "$file" || DELETE_FAILED=1
done < /tmp/local_files_not_in_backup
if [ "$DELETE_FAILED" = 1 ]; then
RESTORE_FAILED=1
echo "Failed to delete (some) local files not in backup archive."
fi
fi
fi
fi fi
# Set backup-mode to restore since it was a restore # Set backup-mode to restore since it was a restore
CONTENTS="$(jq '."backup-mode" = "restore"' /nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/configuration.json)" CONTENTS="$(jq '."backup-mode" = "restore"' /nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/configuration.json)"
echo -E "${CONTENTS}" > /nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/configuration.json echo -E "${CONTENTS}" > /nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/configuration.json
# Reset the backup location vars to the currently used one # Reset the backup path to the currently used one
CONTENTS="$(jq ".borg_backup_host_location = $BORG_LOCATION" /nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/configuration.json)" CONTENTS="$(jq ".borg_backup_host_location = $BORG_LOCATION" /nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/configuration.json)"
echo -E "${CONTENTS}" > /nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/configuration.json echo -E "${CONTENTS}" > /nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/configuration.json
CONTENTS="$(jq ".borg_remote_repo = $REMOTE_REPO" /nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/configuration.json)"
echo -E "${CONTENTS}" > /nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/configuration.json
# Reset the AIO password to the currently used one # Reset the AIO password to the currently used one
CONTENTS="$(jq ".password = $AIO_PASSWORD" /nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/configuration.json)" CONTENTS="$(jq ".password = $AIO_PASSWORD" /nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/configuration.json)"
@ -512,13 +305,11 @@ if [ "$BORG_MODE" = restore ]; then
chmod 770 "/nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/daily_backup_time" chmod 770 "/nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/daily_backup_time"
fi fi
if [ "$RESTORE_FAILED" = 1 ]; then umount /tmp/borg
exit 1
fi
# Inform user # Inform user
get_expiration_time get_expiration_time
echo "Restore finished successfully on $END_DATE_READABLE ($DURATION_READABLE)." echo "Restore finished successfully on $END_DATE_READABLE ($DURATION_READABLE)"
# Add file to Nextcloud container so that it skips any update the next time # Add file to Nextcloud container so that it skips any update the next time
touch "/nextcloud_aio_volumes/nextcloud_aio_nextcloud_data/skip.update" touch "/nextcloud_aio_volumes/nextcloud_aio_nextcloud_data/skip.update"
@ -527,15 +318,6 @@ if [ "$BORG_MODE" = restore ]; then
# Add file to Nextcloud container so that it performs a fingerprint update the next time # Add file to Nextcloud container so that it performs a fingerprint update the next time
touch "/nextcloud_aio_volumes/nextcloud_aio_nextcloud_data/fingerprint.update" touch "/nextcloud_aio_volumes/nextcloud_aio_nextcloud_data/fingerprint.update"
chmod 777 "/nextcloud_aio_volumes/nextcloud_aio_nextcloud_data/fingerprint.update" chmod 777 "/nextcloud_aio_volumes/nextcloud_aio_nextcloud_data/fingerprint.update"
# Add file to Netcloud container to trigger a preview scan the next time it starts
if [ -n "$RESTORE_EXCLUDE_PREVIEWS" ]; then
touch "/nextcloud_aio_volumes/nextcloud_aio_nextcloud_data/trigger-preview.scan"
chmod 777 "/nextcloud_aio_volumes/nextcloud_aio_nextcloud_data/trigger-preview.scan"
fi
# Delete redis cache
rm -f "/mnt/redis/dump.rdb"
fi fi
# Do the Backup check # Do the Backup check
@ -544,80 +326,37 @@ if [ "$BORG_MODE" = check ]; then
echo "Checking the backup integrity..." echo "Checking the backup integrity..."
# Perform the check # Perform the check
if ! borg check -v --verify-data; then if ! borg check --verify-data "$BORG_BACKUP_DIRECTORY"; then
echo "Some errors were found while checking the backup integrity!" echo "Some errors were found while checking the backup integrity!"
echo "Check the AIO interface for advice on how to proceed now!"
exit 1 exit 1
fi fi
# Inform user # Inform user
get_expiration_time get_expiration_time
echo "Check finished successfully on $END_DATE_READABLE ($DURATION_READABLE)." echo "Check finished successfully on $END_DATE_READABLE ($DURATION_READABLE)"
exit 0
fi
# Do the Backup check-repair
if [ "$BORG_MODE" = "check-repair" ]; then
get_start_time
echo "Checking the backup integrity and repairing it..."
# Perform the check-repair
if ! echo YES | borg check -v --repair; then
echo "Some errors were found while checking and repairing the backup integrity!"
exit 1
fi
# Inform user
get_expiration_time
echo "Check finished successfully on $END_DATE_READABLE ($DURATION_READABLE)."
exit 0 exit 0
fi fi
# Do the backup test # Do the backup test
if [ "$BORG_MODE" = test ]; then if [ "$BORG_MODE" = test ]; then
if [ -n "$BORG_REMOTE_REPO" ]; then if ! [ -d "$BORG_BACKUP_DIRECTORY" ]; then
if ! borg info > /dev/null; then echo "No 'borg' directory in the given backup directory found!"
echo "Borg could not get info from the remote repo." echo "Only the files/folders below have been found in the given directory."
echo "See the above borg info output for details." ls -a "$MOUNT_DIR"
exit 1 echo "Please adjust the directory so that the borg archive is positioned in a folder named 'borg' inside the given directory!"
fi exit 1
else elif ! [ -f "$BORG_BACKUP_DIRECTORY/config" ]; then
if ! [ -d "$BORG_BACKUP_DIRECTORY" ]; then echo "A 'borg' directory was found but could not find the borg archive."
echo "No 'borg' directory in the given backup directory found!" echo "Only the files/folders below have been found in the borg directory."
echo "Only the files/folders below have been found in the given directory." ls -a "$BORG_BACKUP_DIRECTORY"
ls -a "$MOUNT_DIR" echo "The archive and most importantly the config file must be positioned directly in the 'borg' subfolder."
echo "Please adjust the directory so that the borg archive is positioned in a folder named 'borg' inside the given directory!" exit 1
exit 1 elif ! borg list "$BORG_BACKUP_DIRECTORY"; then
elif ! [ -f "$BORG_BACKUP_DIRECTORY/config" ]; then
echo "A 'borg' directory was found but could not find the borg archive."
echo "Only the files/folders below have been found in the borg directory."
ls -a "$BORG_BACKUP_DIRECTORY"
echo "The archive and most importantly the config file must be positioned directly in the 'borg' subfolder."
exit 1
fi
fi
if ! borg list >/dev/null; then
echo "The entered path seems to be valid but could not open the backup archive." echo "The entered path seems to be valid but could not open the backup archive."
echo "Most likely the entered password was wrong so please adjust it accordingly!" echo "Most likely the entered password was wrong so please adjust it accordingly!"
exit 1 exit 1
else else
if ! borg list | grep "nextcloud-aio"; then echo "Everything looks fine so feel free to continue!"
echo "The backup archive does not contain a valid Nextcloud AIO backup." exit 0
echo "Most likely was the archive not created via Nextcloud AIO."
exit 1
else
echo "Everything looks fine so feel free to continue!"
exit 0
fi
fi fi
fi fi
if [ "$BORG_MODE" = list ]; then
echo "Updating backup list..."
if ! borg info > /dev/null; then
echo "Could not update the backup list."
exit 1
fi
# The update gets done automatically in the wrapper start.sh script.
fi

View file

@ -1,11 +0,0 @@
# These patterns need to be kept in sync with rsync and find excludes in backupscript.sh,
# which use a different syntax (patterns appear in 3 places in total)
nextcloud_aio_volumes/nextcloud_aio_apache/caddy/
nextcloud_aio_volumes/nextcloud_aio_mastercontainer/caddy/
nextcloud_aio_volumes/nextcloud_aio_nextcloud/data/nextcloud.log*
nextcloud_aio_volumes/nextcloud_aio_nextcloud/data/audit.log
nextcloud_aio_volumes/nextcloud_aio_mastercontainer/certs/
nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/daily_backup_running
nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/session_date_file
nextcloud_aio_volumes/nextcloud_aio_mastercontainer/session/
nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/id_borg*

View file

@ -2,7 +2,7 @@
# Variables # Variables
export MOUNT_DIR="/mnt/borgbackup" export MOUNT_DIR="/mnt/borgbackup"
export BORG_BACKUP_DIRECTORY="$MOUNT_DIR/borg" # necessary even when remote to store the aio-lockfile export BORG_BACKUP_DIRECTORY="$MOUNT_DIR/borg"
# Validate BORG_PASSWORD # Validate BORG_PASSWORD
if [ -z "$BORG_PASSWORD" ] && [ -z "$BACKUP_RESTORE_PASSWORD" ]; then if [ -z "$BORG_PASSWORD" ] && [ -z "$BACKUP_RESTORE_PASSWORD" ]; then
@ -18,22 +18,10 @@ else
fi fi
export BORG_UNKNOWN_UNENCRYPTED_REPO_ACCESS_IS_OK=yes export BORG_UNKNOWN_UNENCRYPTED_REPO_ACCESS_IS_OK=yes
export BORG_RELOCATED_REPO_ACCESS_IS_OK=yes export BORG_RELOCATED_REPO_ACCESS_IS_OK=yes
if [ -n "$BORG_REMOTE_REPO" ]; then
export BORG_REPO="$BORG_REMOTE_REPO"
# Location to create the borg ssh pub/priv key
export BORGBACKUP_KEY="/nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/id_borg"
# Accept any host key the first time connecting to the remote. Strictly speaking should be provided by user but you'd
# have to be very unlucky to get MitM'ed on your first connection.
export BORG_RSH="ssh -o StrictHostKeyChecking=accept-new -i $BORGBACKUP_KEY"
else
export BORG_REPO="$BORG_BACKUP_DIRECTORY"
fi
# Validate BORG_MODE # Validate BORG_MODE
if [ "$BORG_MODE" != backup ] && [ "$BORG_MODE" != restore ] && [ "$BORG_MODE" != check ] && [ "$BORG_MODE" != "check-repair" ] && [ "$BORG_MODE" != "test" ] && [ "$BORG_MODE" != "list" ]; then if [ "$BORG_MODE" != backup ] && [ "$BORG_MODE" != restore ] && [ "$BORG_MODE" != check ] && [ "$BORG_MODE" != test ]; then
echo "No correct BORG_MODE mode applied. Valid are 'backup', 'check', 'restore', 'test' and 'list'." echo "No correct BORG_MODE mode applied. Valid are 'backup', 'check', 'restore' and 'test'."
exit 1 exit 1
fi fi
@ -48,8 +36,8 @@ fi
rm -f "/nextcloud_aio_volumes/nextcloud_aio_database_dump/backup-is-running" rm -f "/nextcloud_aio_volumes/nextcloud_aio_database_dump/backup-is-running"
# Get a list of all available borg archives # Get a list of all available borg archives
if borg list &>/dev/null; then if borg list "$BORG_BACKUP_DIRECTORY" &>/dev/null; then
borg list | grep "nextcloud-aio" | awk -F " " '{print $1","$3,$4}' > "/nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/backup_archives.list" borg list "$BORG_BACKUP_DIRECTORY" | grep "nextcloud-aio" | awk -F " " '{print $1","$3,$4}' > "/nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/backup_archives.list"
else else
echo "" > "/nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/backup_archives.list" echo "" > "/nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/backup_archives.list"
fi fi

View file

@ -1,38 +1,6 @@
# syntax=docker/dockerfile:latest # Probably from this file: https://github.com/Cisco-Talos/clamav/blob/main/Dockerfile
FROM alpine:3.23.3 FROM clamav/clamav:0.105.1
RUN set -ex; \ RUN apk add --update --no-cache tzdata
apk upgrade --no-cache -a; \ COPY clamav.conf /tmp/
apk add --no-cache tzdata clamav clamav-milter supervisor bash; \ RUN cat /tmp/clamav.conf >> /etc/clamav/clamd.conf
mkdir -p /tmp /var/lib/clamav /run/clamav /var/log/supervisord /var/run/supervisord; \
chmod 777 -R /tmp /run/clamav /var/log/clamav /var/log/supervisord /var/run/supervisord; \
chown -R 100:100 /var/lib/clamav; \
sed -i "s|#\?MaxDirectoryRecursion.*|MaxDirectoryRecursion 30|g" /etc/clamav/clamd.conf; \
sed -i "s|#\?MaxScanSize.*|MaxScanSize 2000M|g" /etc/clamav/clamd.conf; \
sed -i "s|#\?MaxFileSize.*|MaxFileSize 2000M|g" /etc/clamav/clamd.conf; \
sed -i "s|#\?PCREMaxFileSize.*|PCREMaxFileSize 2000M|g" /etc/clamav/clamd.conf; \
# StreamMaxLength must be synced with av_stream_max_length inside the Nextcloud files_antivirus plugin
sed -i "s|#\?StreamMaxLength.*|StreamMaxLength 2000M|g" /etc/clamav/clamd.conf; \
sed -i "s|#\?TCPSocket|TCPSocket|g" /etc/clamav/clamd.conf; \
sed -i "s|^LocalSocket .*|LocalSocket /tmp/clamd.sock|g" /etc/clamav/clamd.conf; \
sed -i "s|Example| |g" /etc/clamav/clamav-milter.conf; \
sed -i "s|#\?MilterSocket inet:7357|MilterSocket inet:7357|g" /etc/clamav/clamav-milter.conf; \
sed -i "s|#\?ClamdSocket unix:/run/clamav/clamd.sock|ClamdSocket unix:/tmp/clamd.sock|g" /etc/clamav/clamav-milter.conf; \
sed -i "s|#\?OnInfected Quarantine|OnInfected Reject|g" /etc/clamav/clamav-milter.conf; \
sed -i "s|#\?AddHeader Replace|AddHeader Add|g" /etc/clamav/clamav-milter.conf; \
sed -i "s|#\?Foreground yes|Foreground yes|g" /etc/clamav/clamav-milter.conf
COPY --chmod=775 start.sh /start.sh
COPY --chmod=775 healthcheck.sh /healthcheck.sh
COPY --chmod=664 supervisord.conf /supervisord.conf
USER 100
RUN set -ex; \
freshclam --foreground --stdout
VOLUME /var/lib/clamav
ENTRYPOINT ["/start.sh"]
CMD ["/usr/bin/supervisord", "-c", "/supervisord.conf"]
LABEL com.centurylinklabs.watchtower.enable="false" \
wud.watch="false" \
org.label-schema.vendor="Nextcloud"
HEALTHCHECK --start-period=60s --retries=9 CMD /healthcheck.sh

View file

@ -0,0 +1,4 @@
MaxDirectoryRecursion 30
MaxFileSize 100M
PCREMaxFileSize 100M
StreamMaxLength 100M

View file

@ -1,9 +0,0 @@
#!/bin/bash
if [ "$(echo "PING" | nc 127.0.0.1 3310)" != "PONG" ]; then
echo "ERROR: Unable to contact server"
exit 1
fi
echo "Clamd is up"
exit 0

View file

@ -1,8 +0,0 @@
#!/bin/bash
# Print out clamav version for compliance reasons
clamscan --version
echo "Clamav started"
exec "$@"

View file

@ -1,30 +0,0 @@
[supervisord]
nodaemon=true
nodaemon=true
logfile=/var/log/supervisord/supervisord.log
pidfile=/var/run/supervisord/supervisord.pid
childlogdir=/var/log/supervisord/
logfile_maxbytes=50MB
logfile_backups=10
loglevel=error
[program:freshclam]
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stderr
stderr_logfile_maxbytes=0
command=freshclam --foreground --stdout --daemon --daemon-notify=/etc/clamav/clamd.conf
[program:clamd]
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stderr
stderr_logfile_maxbytes=0
command=clamd --foreground --config-file=/etc/clamav/clamd.conf
[program:milter]
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stderr
stderr_logfile_maxbytes=0
command=clamav-milter --config-file=/etc/clamav/clamav-milter.conf

View file

@ -1,16 +0,0 @@
# syntax=docker/dockerfile:latest
# From https://gitlab.collabora.com/collabora-online/docker
# hadolint ignore=DL3007
FROM registry.gitlab.collabora.com/collabora-online/docker:latest
USER root
ARG DEBIAN_FRONTEND=noninteractive
COPY --chmod=775 healthcheck.sh /healthcheck.sh
USER 1001
HEALTHCHECK --start-period=60s --retries=9 CMD /healthcheck.sh
LABEL com.centurylinklabs.watchtower.enable="false" \
wud.watch="false" \
org.label-schema.vendor="Nextcloud"

View file

@ -1,7 +0,0 @@
#!/bin/bash
# Unfortunately, no curl and no nc is installed in the container
# and packages can also not be added as the package list is broken.
# So always exiting 0 for now.
# nc http://127.0.0.1:9980 || exit 1
exit 0

View file

@ -1,15 +1,17 @@
# syntax=docker/dockerfile:latest # From a file located probably somewhere here: https://github.com/CollaboraOnline/online/tree/master/docker
# From a file located probably somewhere here: https://github.com/CollaboraOnline/online/blob/master/docker/from-packages/Dockerfile FROM collabora/code:22.05.6.1.1
FROM collabora/code:25.04.8.2.1
USER root USER root
ARG DEBIAN_FRONTEND=noninteractive
COPY --chmod=775 healthcheck.sh /healthcheck.sh RUN set -ex; \
\
apt-get update; \
export DEBIAN_FRONTEND=noninteractive; \
apt-get install -y --no-install-recommends \
tzdata \
; \
rm -rf /var/lib/apt/lists/*
USER 1001 USER 104
HEALTHCHECK --start-period=60s --retries=9 CMD /healthcheck.sh HEALTHCHECK CMD curl -skfI localhost:9980 || exit 1
LABEL com.centurylinklabs.watchtower.enable="false" \
wud.watch="false" \
org.label-schema.vendor="Nextcloud"

View file

@ -1,7 +0,0 @@
#!/bin/bash
# Unfortunately, no curl and no nc is installed in the container
# and packages can also not be added as the package list is broken.
# So always exiting 0 for now.
# nc http://127.0.0.1:9980 || exit 1
exit 0

View file

@ -1,23 +0,0 @@
# syntax=docker/dockerfile:latest
FROM haproxy:3.3.2-alpine
# hadolint ignore=DL3002
USER root
ENV NEXTCLOUD_HOST=nextcloud-aio-nextcloud
RUN set -ex; \
apk upgrade --no-cache -a; \
apk add --no-cache \
ca-certificates \
tzdata \
bash \
bind-tools; \
chmod -R 777 /tmp
COPY --chmod=775 *.sh /
COPY --chmod=664 haproxy.cfg /haproxy.cfg
ENTRYPOINT ["/start.sh"]
HEALTHCHECK CMD /healthcheck.sh
LABEL com.centurylinklabs.watchtower.enable="false" \
wud.watch="false" \
org.label-schema.vendor="Nextcloud"

View file

@ -1,68 +0,0 @@
# Inspiration: https://github.com/Tecnativa/docker-socket-proxy/blob/master/haproxy.cfg
global
maxconn 10
defaults
timeout connect 30s
timeout client 30s
timeout server 1800s
frontend http
mode http
bind :::2375 v4v6
http-request deny unless { src 127.0.0.1 } || { src ::1 } || { src NC_IPV4_PLACEHOLDER } || { src NC_IPV6_PLACEHOLDER }
# docker system _ping
http-request allow if { path,url_dec -m reg -i ^(/v[\d\.]+)?/_ping$ } METH_GET
# docker inspect image: GET images/%s/json
http-request allow if { path,url_dec -m reg -i ^(/v[\d\.]+)?/images/.*/json } METH_GET
# container inspect: GET containers/%s/json
http-request allow if { path,url_dec -m reg -i ^(/v[\d\.]+)?/containers/nc_app_[a-zA-Z0-9_.-]+/json } METH_GET
# container inspect: GET containers/%s/logs
http-request allow if { path,url_dec -m reg -i ^(/v[\d\.]+)?/containers/nc_app_[a-zA-Z0-9_.-]+/logs } METH_GET
# container start/stop: POST containers/%s/start containers/%s/stop
http-request allow if { path,url_dec -m reg -i ^(/v[\d\.]+)?/containers/nc_app_[a-zA-Z0-9_.-]+/((start)|(stop)) } METH_POST
# container rm: DELETE containers/%s
http-request allow if { path,url_dec -m reg -i ^(/v[\d\.]+)?/containers/nc_app_[a-zA-Z0-9_.-]+ } METH_DELETE
# container update/exec: POST containers/%s/update containers/%s/exec
http-request allow if { path,url_dec -m reg -i ^(/v[\d\.]+)?/containers/nc_app_[a-zA-Z0-9_.-]+/((update)|(exec)) } METH_POST
# container put: PUT containers/%s/archive
http-request allow if { path,url_dec -m reg -i ^(/v[\d\.]+)?/containers/nc_app_[a-zA-Z0-9_.-]+/archive } METH_PUT
# run exec instance: POST exec/%s
http-request allow if { path,url_dec -m reg -i ^(/v[\d\.]+)?/exec/[a-zA-Z0-9_.-]+/start } METH_POST
# container create: POST containers/create?name=%s
# ACL to restrict container name to nc_app_[a-zA-Z0-9_.-]+
acl nc_app_container_name url_param(name) -m reg -i "^nc_app_[a-zA-Z0-9_.-]+"
# ACL to restrict the number of Mounts to 1
acl one_mount_volume req.body -m reg -i "\"Mounts\"\s*:\s*\[\s*(?:(?!\"Mounts\"\s*:\s*\[)[^}]*)}[^}]*\]"
# ACL to deny if there are any binds
acl binds_present req.body -m reg -i "\"HostConfig\"\s*:.*\"Binds\"\s*:"
# ACL to restrict the type of Mounts to volume
acl type_not_volume req.body -m reg -i "\"Mounts\"\s*:\s*\[[^\]]*(\"Type\"\s*:\s*\"(?!volume\b)\w+\"[^\]]*)+\]"
http-request deny if { path,url_dec -m reg -i ^(/v[\d\.]+)?/containers/create } nc_app_container_name !one_mount_volume binds_present type_not_volume METH_POST
# ACL to restrict container creation, that it has HostConfig.Privileged(by searching for "Privileged" word in all payload)
acl no_privileged_flag req.body -m reg -i "\"Privileged\""
# ACL to allow mount volume with strict pattern for name: nc_app_[a-zA-Z0-9_.-]+_data
acl nc_app_volume_data_only req.body -m reg -i "\"Mounts\"\s*:\s*\[\s*{[^}]*\"Source\"\s*:\s*\"nc_app_[a-zA-Z0-9_.-]+_data\""
http-request allow if { path,url_dec -m reg -i ^(/v[\d\.]+)?/containers/create } nc_app_container_name !no_privileged_flag nc_app_volume_data_only METH_POST
# end of container create
# volume create: POST volumes/create
# restrict name
acl nc_app_volume_data req.body -m reg -i "\"Name\"\s*:\s*\"nc_app_[a-zA-Z0-9_.-]+_data\""
# do not allow to use "device" word e.g., "--opt device=:/path/to/dir"
acl volume_no_device req.body -m reg -i "\"device\""
http-request allow if { path,url_dec -m reg -i ^(/v[\d\.]+)?/volumes/create } nc_app_volume_data !volume_no_device METH_POST
# volume rm: DELETE volumes/%s
http-request allow if { path,url_dec -m reg -i ^(/v[\d\.]+)?/volumes/nc_app_[a-zA-Z0-9_.-]+_data } METH_DELETE
# image pull: POST images/create?fromImage=%s
http-request allow if { path,url_dec -m reg -i ^(/v[\d\.]+)?/images/create } METH_POST
http-request deny
default_backend dockerbackend
backend dockerbackend
mode http
server dockersocket /var/run/docker.sock

View file

@ -1,4 +0,0 @@
#!/bin/bash
nc -z "$NEXTCLOUD_HOST" 9001 || exit 0
nc -z 127.0.0.1 2375 || exit 1

View file

@ -1,23 +0,0 @@
#!/bin/sh
# Only start container if nextcloud is accessible
while ! nc -z "$NEXTCLOUD_HOST" 9001; do
echo "Waiting for Nextcloud to start..."
sleep 5
done
set -x
IPv4_ADDRESS_NC="$(dig nextcloud-aio-nextcloud IN A +short +search | grep '^[0-9.]\+$' | sort | head -n1)"
HAPROXYFILE="$(sed "s|NC_IPV4_PLACEHOLDER|$IPv4_ADDRESS_NC|" /haproxy.cfg)"
echo "$HAPROXYFILE" > /tmp/haproxy.cfg
IPv6_ADDRESS_NC="$(dig nextcloud-aio-nextcloud AAAA +short +search | grep '^[0-9a-f:]\+$' | sort | head -n1)"
if [ -n "$IPv6_ADDRESS_NC" ]; then
HAPROXYFILE="$(sed "s|NC_IPV6_PLACEHOLDER|$IPv6_ADDRESS_NC|" /tmp/haproxy.cfg)"
else
HAPROXYFILE="$(sed "s# || { src NC_IPV6_PLACEHOLDER }##g" /tmp/haproxy.cfg)"
fi
echo "$HAPROXYFILE" > /tmp/haproxy.cfg
set +x
haproxy -f /tmp/haproxy.cfg -db

View file

@ -1,22 +1,18 @@
# syntax=docker/dockerfile:latest FROM alpine:3.16.2
FROM alpine:3.23.3 RUN apk add --update --no-cache lighttpd bash curl
RUN set -ex; \
apk upgrade --no-cache -a; \
apk add --no-cache bash lighttpd netcat-openbsd; \
adduser -S www-data -G www-data; \
rm -rf /etc/lighttpd/lighttpd.conf; \
chmod 777 -R /etc/lighttpd; \
mkdir -p /var/www/domaincheck; \
chown www-data:www-data -R /var/www; \
chmod 777 -R /var/www/domaincheck
COPY --chown=www-data:www-data lighttpd.conf /lighttpd.conf
COPY --chmod=775 start.sh /start.sh RUN adduser -S www-data -G www-data
RUN rm -rf /etc/lighttpd/lighttpd.conf
COPY lighttpd.conf /etc/lighttpd/lighttpd.conf
RUN chmod +r -R /etc/lighttpd && \
chown www-data:www-data -R /var/www && \
chown www-data:www-data /etc/lighttpd/lighttpd.conf
COPY start.sh /
RUN chmod +x /start.sh
USER www-data USER www-data
RUN mkdir -p /var/www/domaincheck/
ENTRYPOINT ["/start.sh"] ENTRYPOINT ["/start.sh"]
HEALTHCHECK CMD nc -z 127.0.0.1 $APACHE_PORT || exit 1 HEALTHCHECK CMD curl -skfI localhost:$APACHE_PORT || exit 1
LABEL com.centurylinklabs.watchtower.enable="false" \
wud.watch="false" \
org.label-schema.vendor="Nextcloud"

View file

@ -11,7 +11,7 @@ if [ -z "$APACHE_PORT" ]; then
export APACHE_PORT="443" export APACHE_PORT="443"
fi fi
CONF_FILE="$(sed "s|ipv6-placeholder|\[::\]:$APACHE_PORT|" /lighttpd.conf)" CONF_FILE="$(sed "s|ipv6-placeholder|\[::\]:$APACHE_PORT|" /etc/lighttpd/lighttpd.conf)"
echo "$CONF_FILE" > /etc/lighttpd/lighttpd.conf echo "$CONF_FILE" > /etc/lighttpd/lighttpd.conf
# Check config file # Check config file

View file

@ -1,27 +1,6 @@
# syntax=docker/dockerfile:latest
# Probably from here https://github.com/elastic/elasticsearch/blob/main/distribution/docker/src/docker/Dockerfile # Probably from here https://github.com/elastic/elasticsearch/blob/main/distribution/docker/src/docker/Dockerfile
FROM elasticsearch:8.19.10 FROM elasticsearch:7.17.6
USER root RUN elasticsearch-plugin install --batch ingest-attachment
ARG DEBIAN_FRONTEND=noninteractive HEALTHCHECK CMD curl -skfI localhost:9200 || exit 1
# hadolint ignore=DL3008
RUN set -ex; \
\
apt-get update; \
apt-get upgrade -y; \
apt-get install -y --no-install-recommends \
tzdata \
; \
rm -rf /var/lib/apt/lists/*;
COPY --chmod=775 healthcheck.sh /healthcheck.sh
USER 1000:0
HEALTHCHECK --interval=10s --timeout=5s --start-period=1m --retries=5 CMD /healthcheck.sh
LABEL com.centurylinklabs.watchtower.enable="false" \
wud.watch="false" \
org.label-schema.vendor="Nextcloud"
ENV ES_JAVA_OPTS="-Xms512M -Xmx512M"

View file

@ -1,3 +0,0 @@
#!/bin/bash
nc -z 127.0.0.1 9200 || exit 1

View file

@ -1,47 +1,15 @@
# syntax=docker/dockerfile:latest # From https://github.com/h2non/imaginary/blob/master/Dockerfile
FROM golang:1.25.6-alpine3.23 AS go FROM nextcloud/imaginary:20220919
ENV IMAGINARY_HASH=6a274b488759a896aff02f52afee6e50b5e3a3ee
USER root
RUN set -ex; \ RUN set -ex; \
apk upgrade --no-cache -a; \ \
apk add --no-cache \ apt-get update; \
vips-dev \ apt-get install -y --no-install-recommends \
vips-magick \
vips-heif \
vips-jxl \
vips-poppler \
build-base; \
go install github.com/h2non/imaginary@"$IMAGINARY_HASH";
FROM alpine:3.23.3
RUN set -ex; \
apk upgrade --no-cache -a; \
apk add --no-cache \
tzdata \
ca-certificates \ ca-certificates \
netcat-openbsd \ curl \
vips \ ; \
vips-magick \ rm -rf /var/lib/apt/lists/*
vips-heif \ USER nobody
vips-jxl \
vips-poppler \
ttf-dejavu \
bash
COPY --from=go /go/bin/imaginary /usr/local/bin/imaginary HEALTHCHECK CMD curl -skI 127.0.0.1:9000 || exit 1
COPY --chmod=775 start.sh /start.sh
COPY --chmod=775 healthcheck.sh /healthcheck.sh
ENV PORT=9000
USER 65534
# https://github.com/h2non/imaginary#memory-issues
ENV MALLOC_ARENA_MAX=2
ENTRYPOINT ["/start.sh"]
HEALTHCHECK CMD /healthcheck.sh
LABEL com.centurylinklabs.watchtower.enable="false" \
wud.watch="false" \
org.label-schema.vendor="Nextcloud"

View file

@ -1,3 +0,0 @@
#!/bin/bash
nc -z 127.0.0.1 "$PORT" || exit 1

View file

@ -1,8 +0,0 @@
#!/bin/bash
echo "Imaginary has started"
if [ -z "$IMAGINARY_SECRET" ]; then
imaginary -return-size -max-allowed-resolution 222.2 "$@"
else
imaginary -return-size -max-allowed-resolution 222.2 -key "$IMAGINARY_SECRET" "$@"
fi

8
Containers/mastercontainer/.idea/.gitignore generated vendored Normal file
View file

@ -0,0 +1,8 @@
# Default ignored files
/shelf/
/workspace.xml
# Datasource local storage ignored files
/dataSources/
/dataSources.local.xml
# Editor-based HTTP Client requests
/httpRequests/

View file

@ -0,0 +1,9 @@
<?xml version="1.0" encoding="UTF-8"?>
<module type="JAVA_MODULE" version="4">
<component name="NewModuleRootManager" inherit-compiler-output="true">
<exclude-output />
<content url="file://$MODULE_DIR$" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
</component>
</module>

View file

@ -0,0 +1,6 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectRootManager">
<output url="file://$PROJECT_DIR$/out" />
</component>
</project>

View file

@ -0,0 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectModuleManager">
<modules>
<module fileurl="file://$PROJECT_DIR$/.idea/mastercontainer.iml" filepath="$PROJECT_DIR$/.idea/mastercontainer.iml" />
</modules>
</component>
</project>

View file

@ -0,0 +1,6 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="VcsDirectoryMappings">
<mapping directory="$PROJECT_DIR$/../.." vcs="Git" />
</component>
</project>

View file

@ -6,32 +6,17 @@
storage file_system { storage file_system {
root /mnt/docker-aio-config/caddy/ root /mnt/docker-aio-config/caddy/
} }
log {
level ERROR
}
servers {
protocols h1 h2 h2c
}
on_demand_tls {
ask http://127.0.0.1:9876/
}
} }
http://:80 { http://:80 {
redir https://{host}{uri} permanent redir https://{host}{uri}
} }
https://:8443 { https://:8443 {
reverse_proxy 127.0.0.1:8000 reverse_proxy localhost:8000
tls { tls {
on_demand on_demand
issuer acme {
disable_tlsalpn_challenge
}
} }
} }

View file

@ -1,138 +1,107 @@
# syntax=docker/dockerfile:latest
# Docker CLI is a requirement # Docker CLI is a requirement
FROM docker:29.2.0-cli AS docker FROM docker:20.10.18-dind-alpine3.16 as dind
# Caddy is a requirement # Caddy is a requirement
FROM caddy:2.10.2-alpine AS caddy FROM caddy:2.5.2-alpine as caddy
# From https://github.com/docker-library/php/blob/master/8.4/alpine3.23/fpm/Dockerfile # From https://github.com/docker-library/php/blob/master/8.0/bullseye/apache/Dockerfile
FROM php:8.4.17-fpm-alpine3.23 FROM php:8.0.23-apache-bullseye
EXPOSE 80 EXPOSE 80
EXPOSE 8080 EXPOSE 8080
EXPOSE 8443 EXPOSE 8443
# Overwrite home variable for subservices RUN mkdir -p /mnt/docker-aio-config/;
ENV HOME=/var/www
COPY --from=caddy /usr/bin/caddy /usr/bin/caddy VOLUME /mnt/docker-aio-config/
COPY --from=docker /usr/local/bin/docker /usr/local/bin/docker
COPY community-containers /var/www/docker-aio/community-containers RUN mkdir -p /var/www/docker-aio;
COPY php /var/www/docker-aio/php
COPY --chmod=775 Containers/mastercontainer/*.sh /
COPY --chmod=664 Containers/mastercontainer/Caddyfile /Caddyfile
COPY --chmod=664 Containers/mastercontainer/supervisord.conf /supervisord.conf
COPY Containers/mastercontainer/mastercontainer.conf /etc/apache2/sites-available/mastercontainer.conf
WORKDIR /var/www/docker-aio WORKDIR /var/www/docker-aio
# hadolint ignore=SC2086,DL3047,DL3003,DL3004 RUN apt-get update; \
RUN set -ex; \ apt-get install -y --no-install-recommends \
apk upgrade --no-cache -a; \ git \
apk add --no-cache shadow; \
groupmod -g 33 www-data; \
usermod -u 33 -g 33 www-data; \
\
apk add --no-cache \
util-linux-misc \
ca-certificates \
wget \
bash \
apache2 \
apache2-proxy \
apache2-ssl \
supervisor \ supervisor \
openssl \ openssl \
sudo \ sudo \
netcat-openbsd \ dpkg-dev \
curl \ netcat \
grep; \ ; \
\ rm -rf /var/lib/apt/lists/*
apk add --no-cache --virtual .build-deps \
autoconf \ COPY --from=caddy /usr/bin/caddy /usr/bin/
build-base; \ RUN chmod +x /usr/bin/caddy
pecl install APCu-5.1.28; \
docker-php-ext-enable apcu; \ COPY --from=dind /usr/local/bin/docker /usr/local/bin/
rm -r /tmp/pear; \ RUN chmod +x /usr/local/bin/docker
runDeps="$( \
scanelf --needed --nobanner --format '%n#p' --recursive /usr/local/lib/php/extensions \ RUN set -ex; \
| tr ',' '\n' \ pecl install APCu-5.1.22; \
| sort -u \ docker-php-ext-enable apcu
| awk 'system("[ -e /usr/local/lib/" $1 " ]") == 0 { next } { print "so:" $1 }' \
)"; \ RUN set -e && \
apk add --no-cache --virtual .nextcloud-aio-rundeps $runDeps; \ curl -sS https://getcomposer.org/installer | php && \
apk del .build-deps; \ mv composer.phar /usr/local/bin/composer && \
grep -q '^pm = dynamic' /usr/local/etc/php-fpm.d/www.conf; \ chmod +x /usr/local/bin/composer && \
sed -i 's/^pm = dynamic/pm = ondemand/' /usr/local/etc/php-fpm.d/www.conf; \
sed -i 's/^pm.max_children =.*/pm.max_children = 80/' /usr/local/etc/php-fpm.d/www.conf; \
sed -i 's|access.log = /proc/self/fd/2|access.log = /proc/self/fd/1|' /usr/local/etc/php-fpm.d/docker.conf; \
grep -q ';listen.allowed_clients' /usr/local/etc/php-fpm.d/www.conf; \
sed -i 's|;listen.allowed_clients.*|listen.allowed_clients = 127.0.0.1,::1|' /usr/local/etc/php-fpm.d/www.conf; \
\
apk add --no-cache git; \
wget https://getcomposer.org/installer -O - | php -- --install-dir=/usr/local/bin --filename=composer; \
chmod +x /usr/local/bin/composer; \
cd /var/www/docker-aio; \ cd /var/www/docker-aio; \
rm -r ./php/tests; \ git clone https://github.com/nextcloud-releases/all-in-one.git --depth 1 .; \
chown www-data:www-data -R /var/www/docker-aio; \
cd php; \ cd php; \
sudo -E -u www-data composer install --no-dev; \ composer install --no-dev; \
sudo -E -u www-data composer clear-cache; \ composer clearcache; \
cd ..; \ cd ..; \
rm -f /usr/local/bin/composer; \ rm -f /usr/local/bin/composer; \
chmod -R 770 /var/www/docker-aio; \ chmod 770 -R ./; \
chown -R www-data:www-data /var/www; \ chown www-data:www-data -R ./; \
rm -r php/data; \ rm -r ./php/data; \
rm -r php/session; \ rm -r ./php/session
\
mkdir -p /etc/apache2/certs; \ RUN mkdir -p /etc/apache2/certs && \
cd /etc/apache2/certs; \ cd /etc/apache2/certs && \
openssl req -new -newkey rsa:4096 -days 3650 -nodes -x509 -subj "/C=DE/ST=BE/L=Local/O=Dev/CN=nextcloud.local" -keyout /etc/apache2/certs/ssl.key -out /etc/apache2/certs/ssl.crt; \ openssl req -new -newkey rsa:4096 -days 3650 -nodes -x509 -subj "/C=DE/ST=BE/L=Local/O=Dev/CN=nextcloud.local" -keyout ./ssl.key -out ./ssl.crt;
\
sed -i \ COPY mastercontainer.conf /etc/apache2/sites-available/
-e '/^Listen /d' \
-e 's/^LogLevel .*/LogLevel error/' \ RUN a2enmod rewrite \
-e 's|^ErrorLog .*|ErrorLog /proc/self/fd/2|' \ headers \
-e 's/User apache/User www-data/g' \ env \
-e 's/Group apache/Group www-data/g' \ mime \
-e 's/^#\(LoadModule .*mod_rewrite.so\)/\1/' \ dir \
-e 's/^#\(LoadModule .*mod_headers.so\)/\1/' \ authz_core \
-e 's/^#\(LoadModule .*mod_env.so\)/\1/' \ proxy \
-e 's/^#\(LoadModule .*mod_mime.so\)/\1/' \ proxy_http \
-e 's/^#\(LoadModule .*mod_dir.so\)/\1/' \ ssl
-e 's/^#\(LoadModule .*mod_authz_core.so\)/\1/' \
-e 's/^#\(LoadModule .*mod_mpm_event.so\)/\1/' \ RUN rm /etc/apache2/ports.conf; \
-e 's/\(LoadModule .*mod_mpm_worker.so\)/#\1/' \ sed -s -i -e "s/Include ports.conf//" /etc/apache2/apache2.conf; \
-e 's/\(LoadModule .*mod_mpm_prefork.so\)/#\1/' \ sed -i "/^Listen /d" /etc/apache2/apache2.conf
-e 's/\(ScriptAlias \)/#\1/' \
/etc/apache2/httpd.conf; \ RUN a2dissite 000-default && \
mkdir -p /etc/apache2/logs; \ a2dissite default-ssl && \
rm /etc/apache2/conf.d/ssl.conf; \ a2ensite mastercontainer.conf
echo "ServerName localhost" | tee -a /etc/apache2/httpd.conf; \
grep -q '^LoadModule lbmethod_heartbeat_module' /etc/apache2/conf.d/proxy.conf; \ RUN mkdir /var/log/supervisord; \
sed -i 's|^LoadModule lbmethod_heartbeat_module.*|#LoadModule lbmethod_heartbeat_module|' /etc/apache2/conf.d/proxy.conf; \
echo "SSLSessionCache nonenotnull" | tee -a /etc/apache2/httpd.conf; \
echo "LoadModule ssl_module modules/mod_ssl.so" | tee -a /etc/apache2/httpd.conf; \
echo "LoadModule socache_shmcb_module modules/mod_socache_shmcb.so" | tee -a /etc/apache2/httpd.conf; \
echo "Include /etc/apache2/sites-available/mastercontainer.conf" | tee -a /etc/apache2/httpd.conf; \
\
rm -f /etc/apache2/conf.d/default.conf \
/etc/apache2/conf.d/userdir.conf \
/etc/apache2/conf.d/info.conf; \
\
rm -rf /var/www/localhost/cgi-bin/; \
mkdir /var/log/supervisord; \
mkdir /var/run/supervisord; mkdir /var/run/supervisord;
# hadolint ignore=DL3048 COPY Caddyfile /
LABEL org.label-schema.vendor="Nextcloud" \ COPY start.sh /usr/bin/
wud.watch="false" \ COPY backup-time-file-watcher.sh /
com.docker.compose.project="nextcloud-aio" COPY session-deduplicator.sh /
COPY cron.sh /
COPY daily-backup.sh /
COPY supervisord.conf /
COPY healthcheck.sh /
RUN chmod +x /usr/bin/start.sh; \
chmod +x /cron.sh; \
chmod +x /session-deduplicator.sh; \
chmod +x /backup-time-file-watcher.sh; \
chmod +x /daily-backup.sh; \
chmod a+r /Caddyfile; \
chmod +x /healthcheck.sh
# hadolint ignore=DL3002
USER root USER root
ENTRYPOINT ["/start.sh"] ENTRYPOINT ["start.sh"]
CMD ["/usr/bin/supervisord", "-c", "/supervisord.conf"]
HEALTHCHECK CMD /healthcheck.sh HEALTHCHECK CMD /healthcheck.sh

View file

@ -1,69 +0,0 @@
# Nextcloud All-in-One `mastercontainer`
This folder contains the OCI/Docker container definition, along with associated resources and
configuration files, for building the `mastercontainer` as part of the Nextcloud All-in-One
project. This container hosts [the Nextcloud AIO interface](
https://github.com/nextcloud/all-in-one/tree/main/php)[^app], and a dedicated PHP environment
for it (which is completely independent of the Nextcloud Server).
## Overview
The mastercontainer acts as the central orchestration service for the deployment and management
of all other containers in the Nextcloud All-in-One stack. It hosts:
- A dedicated PHP SAPI/backend (php-fpm) for AIO itself (not Nextcloud Server)
- An Apache service for accessing the AIO interface via a self-signed HTTPS VirtualHost on 8080/tcp
- A Caddy reverse proxy service enabling HTTPS access to the AIO frontend on port 8443/tcp.
- Caddy will automatically issue a Let's Encrypt issued certificate if port 80 and 8443
is open/forwarded and a domain pointer is in place; then, simply open the Nextcloud AIO interface using the
domain (`https://your-domain-that-points-to-this-server.tld:8443`). The Let's Encrypt certificate request will
use an [ACME HTTP-01](https://letsencrypt.org/docs/challenge-types/#http-01-challenge) challenge.
- Miscellaneous support services specific to AIO (backup management, health checks, etc.)
## Key Responsibilities
- Orchestrates the deployment and lifecycle of all Nextcloud service containers
- Handles initial setup and container configuration
- Coordinates image updates
- Monitors general system health
It triggers the initial installation and ensures the smooth operation of the Nextcloud
All-in-One stack.
## Contents
- **Dockerfile**: Instructions for building the mastercontainer image.
- **Entrypoint script**: The `start.sh` script is used for container initialization and runtime
configuration before starting supervisord.
- [**Nextcloud All-in-One Controller App**](https://github.com/nextcloud/all-in-one/tree/main/php): The
core AIO orchestrator that handles configuration and settings for the containers.
- **Supervisor**: The `supervisord.conf` file defines the long-running services hosted within
the container (php-fpm, cron, etc.)
## Usage
This container should be used as the trigger image when deploying the Nextcloud All-in-One
stack in a Docker or other OCI-compliant container environment. For detailed deployment
instructions, refer to the [project documentation](
https://github.com/nextcloud/all-in-one).
## Related Resources
- [Main Repository](https://github.com/nextcloud/all-in-one)
- [Documentation](https://github.com/nextcloud/all-in-one#readme)
## Contributing
Contributions are welcome! Please follow the Nextcloud project's guidelines and submit pull
requests or issues via the main repository.
## License
This folder and its contents are licensed under the
[GNU AGPLv3](https://www.gnu.org/licenses/agpl-3.0.html), in line with the rest of Nextcloud
All-in-One.
[^app]: The Nextcloud All-in-One interface allows users to install, configure, and
manage their Nextcloud instance and related containers via a secure web interface and API.
It automates and simplifies complex tasks such as container orchestration, backups, updates,
and service management for users deploying Nextcloud in Docker environments.

View file

@ -12,20 +12,15 @@ while true; do
export AUTOMATIC_UPDATES=0 export AUTOMATIC_UPDATES=0
export START_CONTAINERS=1 export START_CONTAINERS=1
fi fi
if [ "$(sed -n '3p' "/mnt/docker-aio-config/data/daily_backup_time")" != 'successNotificationsAreNotEnabled' ]; then
export SEND_SUCCESS_NOTIFICATIONS=1
else
export SEND_SUCCESS_NOTIFICATIONS=0
fi
set +x set +x
if [ -f "/mnt/docker-aio-config/data/daily_backup_running" ]; then
export LOCK_FILE_PRESENT=1
else
export LOCK_FILE_PRESENT=0
fi
else else
export BACKUP_TIME="04:00" export BACKUP_TIME="04:00"
export DAILY_BACKUP=0 export DAILY_BACKUP=0
fi
if [ -f "/mnt/docker-aio-config/data/daily_backup_running" ]; then
export LOCK_FILE_PRESENT=1
else
export LOCK_FILE_PRESENT=0 export LOCK_FILE_PRESENT=0
fi fi
@ -43,32 +38,14 @@ while true; do
# Make sure to delete the lock file always # Make sure to delete the lock file always
rm -f "/mnt/docker-aio-config/data/daily_backup_running" rm -f "/mnt/docker-aio-config/data/daily_backup_running"
# Check for updates and send notification if yes on saturdays # Check for updates and send notification if yes
if [ "$(date +%u)" = 6 ]; then sudo -u www-data php /var/www/docker-aio/php/src/Cron/UpdateNotification.php
sudo -E -u www-data php /var/www/docker-aio/php/src/Cron/UpdateNotification.php
fi
# Check if AIO is outdated
sudo -E -u www-data php /var/www/docker-aio/php/src/Cron/OutdatedNotification.php
# Remove sessions older than 24h # Remove sessions older than 24h
find "/mnt/docker-aio-config/session/" -mindepth 1 -mmin +1440 -delete find "/mnt/docker-aio-config/session/" -mindepth 1 -mmin +1440 -delete
# Remove nextcloud-aio-domaincheck container
if sudo -E -u www-data docker ps --format "{{.Names}}" --filter "status=exited" | grep -q "^nextcloud-aio-domaincheck$"; then
sudo -E -u www-data docker container remove nextcloud-aio-domaincheck
fi
# Remove dangling images # Remove dangling images
sudo -E -u www-data docker image prune --filter "label=org.label-schema.vendor=Nextcloud" --force sudo -u www-data docker image prune -f
# Check for available free space
sudo -E -u www-data php /var/www/docker-aio/php/src/Cron/CheckFreeDiskSpace.php
# Remove mastercontainer from default bridge network
if sudo -E -u www-data docker inspect nextcloud-aio-mastercontainer --format "{{.NetworkSettings.Networks}}" | grep -q "bridge"; then
sudo -E -u www-data docker network disconnect bridge nextcloud-aio-mastercontainer
fi
# Wait 60s so that the whole loop will not be executed again # Wait 60s so that the whole loop will not be executed again
sleep 60 sleep 60

View file

@ -2,13 +2,6 @@
echo "Daily backup script has started" echo "Daily backup script has started"
# Check if initial configuration has been done, otherwise this script should do nothing.
CONFIG_FILE=/mnt/docker-aio-config/data/configuration.json
if ! [ -f "$CONFIG_FILE" ] || (! grep -q "wasStartButtonClicked.*1" "$CONFIG_FILE" && ! grep -q "wasStartButtonClicked.*true" "$CONFIG_FILE"); then
echo "Initial configuration via AIO interface not done yet. Exiting..."
exit 0
fi
# Daily backup and backup check cannot be run at the same time # Daily backup and backup check cannot be run at the same time
if [ "$DAILY_BACKUP" = 1 ] && [ "$CHECK_BACKUP" = 1 ]; then if [ "$DAILY_BACKUP" = 1 ] && [ "$CHECK_BACKUP" = 1 ]; then
echo "Daily backup and backup check cannot be run at the same time. Exiting..." echo "Daily backup and backup check cannot be run at the same time. Exiting..."
@ -18,25 +11,16 @@ fi
# Delete all active sessions and create a lock file # Delete all active sessions and create a lock file
# But don't kick out the user if the mastercontainer was just updated since we block the interface either way with the lock file # But don't kick out the user if the mastercontainer was just updated since we block the interface either way with the lock file
if [ "$LOCK_FILE_PRESENT" = 0 ] || ! [ -f "/mnt/docker-aio-config/data/daily_backup_running" ]; then if [ "$LOCK_FILE_PRESENT" = 0 ] || ! [ -f "/mnt/docker-aio-config/data/daily_backup_running" ]; then
find "/mnt/docker-aio-config/session/" -mindepth 1 -delete rm -f "/mnt/docker-aio-config/session/"*
fi fi
sudo -E -u www-data touch "/mnt/docker-aio-config/data/daily_backup_running" sudo -u www-data touch "/mnt/docker-aio-config/data/daily_backup_running"
# Check if apache is running/stopped, watchtower is stopped and backupcontainer is stopped # Check if apache is running/stopped, watchtower is stopped and backupcontainer is stopped
LOCAL_APACHE_PORT="$(docker inspect nextcloud-aio-apache --format "{{.Config.Env}}" | grep -o 'APACHE_PORT=[0-9]\+' | grep -o '[0-9]\+' | head -1)" APACHE_PORT="$(docker inspect nextcloud-aio-apache --format "{{.HostConfig.PortBindings}}" | grep -oP '[0-9]+' | head -1)"
if [ -z "$LOCAL_APACHE_PORT" ]; then while docker ps --format "{{.Names}}" | grep -q "^nextcloud-aio-apache$" && ! nc -z nextcloud-aio-apache "$APACHE_PORT"; do
echo "APACHE_PORT is not set which is not expected..." echo "Waiting for apache to become available"
else sleep 30
# Connect mastercontainer to nextcloud-aio network to make sure that nextcloud-aio-apache is reachable done
# Prevent issues like https://github.com/nextcloud/all-in-one/discussions/5222
docker network connect nextcloud-aio nextcloud-aio-mastercontainer &>/dev/null
# Wait for apache to start
while docker ps --format "{{.Names}}" | grep -q "^nextcloud-aio-apache$" && ! nc -z nextcloud-aio-apache "$LOCAL_APACHE_PORT"; do
echo "Waiting for apache to become available"
sleep 30
done
fi
while docker ps --format "{{.Names}}" | grep -q "^nextcloud-aio-watchtower$"; do while docker ps --format "{{.Names}}" | grep -q "^nextcloud-aio-watchtower$"; do
echo "Waiting for watchtower to stop" echo "Waiting for watchtower to stop"
sleep 30 sleep 30
@ -50,60 +34,46 @@ done
if [ "$AUTOMATIC_UPDATES" = 1 ]; then if [ "$AUTOMATIC_UPDATES" = 1 ]; then
echo "Starting mastercontainer update..." echo "Starting mastercontainer update..."
echo "(The script might get exited due to that. In order to update all the other containers correctly, you need to run this script with the same settings a second time.)" echo "(The script might get exited due to that. In order to update all the other containers correctly, you need to run this script with the same settings a second time.)"
sudo -E -u www-data php /var/www/docker-aio/php/src/Cron/UpdateMastercontainer.php sudo -u www-data php /var/www/docker-aio/php/src/Cron/UpdateMastercontainer.php
fi fi
# Wait for watchtower to stop # Wait for watchtower to stop
if [ "$AUTOMATIC_UPDATES" = 1 ]; then if [ "$AUTOMATIC_UPDATES" = 1 ] && ! docker ps --format "{{.Names}}" | grep -q "^nextcloud-aio-watchtower$"; then
if ! docker ps --format "{{.Names}}" | grep -q "^nextcloud-aio-watchtower$"; then echo "Something seems to be wrong: Watchtower should be started at this step."
echo "Something seems to be wrong: Watchtower should be started at this step." else
fi
while docker ps --format "{{.Names}}" | grep -q "^nextcloud-aio-watchtower$"; do while docker ps --format "{{.Names}}" | grep -q "^nextcloud-aio-watchtower$"; do
echo "Waiting for watchtower to stop" echo "Waiting for watchtower to stop"
sleep 30 sleep 30
done done
fi fi
# Update container images to reduce downtime later on
if [ "$AUTOMATIC_UPDATES" = 1 ]; then
echo "Updating container images..."
sudo -E -u www-data php /var/www/docker-aio/php/src/Cron/PullContainerImages.php
fi
# Stop containers if required # Stop containers if required
# shellcheck disable=SC2235 # shellcheck disable=SC2235
if [ "$CHECK_BACKUP" != 1 ] && ([ "$DAILY_BACKUP" != 1 ] || [ "$STOP_CONTAINERS" = 1 ]); then if [ "$CHECK_BACKUP" != 1 ] && ([ "$DAILY_BACKUP" != 1 ] || [ "$STOP_CONTAINERS" = 1 ]); then
echo "Stopping containers..." echo "Stopping containers..."
sudo -E -u www-data php /var/www/docker-aio/php/src/Cron/StopContainers.php sudo -u www-data php /var/www/docker-aio/php/src/Cron/StopContainers.php
fi fi
# Execute the backup itself and some related tasks (also stops the containers) # Execute the backup itself and some related tasks (also stops the containers)
if [ "$DAILY_BACKUP" = 1 ]; then if [ "$DAILY_BACKUP" = 1 ]; then
echo "Creating daily backup..." echo "Creating daily backup..."
sudo -E -u www-data php /var/www/docker-aio/php/src/Cron/CreateBackup.php sudo -u www-data php /var/www/docker-aio/php/src/Cron/CreateBackup.php
if ! docker ps --format "{{.Names}}" | grep -q "^nextcloud-aio-borgbackup$"; then
echo "Something seems to be wrong: the borg container should be started at this step."
fi
while docker ps --format "{{.Names}}" | grep -q "^nextcloud-aio-borgbackup$"; do
echo "Waiting for backup container to stop"
sleep 30
done
fi fi
# Execute backup check # Execute backup check
if [ "$CHECK_BACKUP" = 1 ]; then if [ "$CHECK_BACKUP" = 1 ]; then
echo "Starting backup check..." echo "Starting backup check..."
sudo -E -u www-data php /var/www/docker-aio/php/src/Cron/CheckBackup.php sudo -u www-data php /var/www/docker-aio/php/src/Cron/CheckBackup.php
fi fi
# Start and/or update containers # Start and/or update containers
if [ "$AUTOMATIC_UPDATES" = 1 ]; then if [ "$AUTOMATIC_UPDATES" = 1 ]; then
echo "Starting and updating containers..." echo "Starting and updating containers..."
sudo -E -u www-data php /var/www/docker-aio/php/src/Cron/StartAndUpdateContainers.php sudo -u www-data php /var/www/docker-aio/php/src/Cron/StartAndUpdateContainers.php
else else
if [ "$START_CONTAINERS" = 1 ]; then if [ "$START_CONTAINERS" = 1 ]; then
echo "Starting containers without updating them..." echo "Starting containers without updating them..."
sudo -E -u www-data php /var/www/docker-aio/php/src/Cron/StartContainers.php sudo -u www-data php /var/www/docker-aio/php/src/Cron/StartContainers.php
fi fi
fi fi
@ -127,7 +97,7 @@ if [ "$DAILY_BACKUP" = 1 ] && ([ "$AUTOMATIC_UPDATES" = 1 ] || [ "$START_CONTAIN
done done
fi fi
echo "Sending backup notification..." echo "Sending backup notification..."
sudo -E -u www-data php /var/www/docker-aio/php/src/Cron/BackupNotification.php sudo -u www-data php /var/www/docker-aio/php/src/Cron/BackupNotification.php
fi fi
echo "Daily backup script has finished" echo "Daily backup script has finished"

View file

@ -1,10 +1,5 @@
#!/bin/bash #!/bin/bash
if [ -f "/mnt/docker-aio-config/data/configuration.json" ]; then if [ -f "/mnt/docker-aio-config/data/configuration.json" ]; then
nc -z 127.0.0.1 80 || exit 1 curl -skfI https://localhost:8080 || exit 1
nc -z 127.0.0.1 8000 || exit 1
nc -z 127.0.0.1 8080 || exit 1
nc -z 127.0.0.1 8443 || exit 1
nc -z 127.0.0.1 9000 || exit 1
nc -z 127.0.0.1 9876 || exit 1
fi fi

View file

@ -1,5 +1,8 @@
Listen 127.0.0.1:8000 Listen 8000
Listen 8080 https Listen 8080
CustomLog ${APACHE_LOG_DIR}/access.log combined
ErrorLog ${APACHE_LOG_DIR}/error.log
# Deny access to .ht files # Deny access to .ht files
<Files ".ht*"> <Files ".ht*">
@ -7,19 +10,10 @@ Listen 8080 https
</Files> </Files>
# Http host # Http host
<VirtualHost 127.0.0.1:8000> <VirtualHost *:8000>
ServerName 127.0.0.1
# Add error log
CustomLog /proc/self/fd/1 proxy
LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\"" proxy
ErrorLog /proc/self/fd/2
ErrorLogFormat "[%t] [%l] [%E] [client: %{X-Forwarded-For}i] [%M] [%{User-Agent}i]"
LogLevel warn
# PHP match # PHP match
<FilesMatch "\.php$"> <FilesMatch "\.php$">
SetHandler "proxy:fcgi://127.0.0.1:9000" SetHandler application/x-httpd-php
</FilesMatch> </FilesMatch>
# Master dir # Master dir
DocumentRoot /var/www/docker-aio/php/public/ DocumentRoot /var/www/docker-aio/php/public/
@ -41,22 +35,16 @@ Listen 8080 https
# Https host # Https host
<VirtualHost *:8080> <VirtualHost *:8080>
# Proxy to https # Proxy to https
ProxyPass / http://127.0.0.1:8000/ ProxyPass / http://localhost:8000/
ProxyPassReverse / http://127.0.0.1:8000/ ProxyPassReverse / http://localhost:8000/
ProxyPreserveHost On ProxyPreserveHost On
# SSL # SSL
SSLCertificateKeyFile /etc/apache2/certs/ssl.key SSLCertificateKeyFile /etc/apache2/certs/ssl.key
SSLCertificateFile /etc/apache2/certs/ssl.crt SSLCertificateFile /etc/apache2/certs/ssl.crt
SSLEngine on SSLEngine on
SSLProtocol -all +TLSv1.2 +TLSv1.3 SSLProtocol -all +TLSv1.2 +TLSv1.3
SSLCipherSuite ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-CHACHA20-POLY1305
SSLHonorCipherOrder off
SSLSessionTickets off
</VirtualHost> </VirtualHost>
# Increase timeout in case e.g. the initial download takes a long time # Increase timeout in case e.g. the initial download takes a long time
Timeout 7200 Timeout 7200
ProxyTimeout 7200 ProxyTimeout 7200
# See https://httpd.apache.org/docs/trunk/mod/core.html#traceenable
TraceEnable Off

View file

@ -1,22 +1,26 @@
#!/bin/bash #!/bin/bash
deduplicate_sessions() {
echo "Deleting duplicate sessions"
find "/mnt/docker-aio-config/session/" -mindepth 1 -exec grep -qv "$NEW_SESSION_TIME" {} \; -delete
}
compare_times() {
if [ -f "/mnt/docker-aio-config/data/session_date_file" ]; then
unset NEW_SESSION_TIME
NEW_SESSION_TIME="$(cat "/mnt/docker-aio-config/data/session_date_file")"
if [ -n "$NEW_SESSION_TIME" ] && [ -n "$OLD_SESSION_TIME" ] && [ "$NEW_SESSION_TIME" != "$OLD_SESSION_TIME" ]; then
deduplicate_sessions
fi
OLD_SESSION_TIME="$NEW_SESSION_TIME"
fi
}
while true; do while true; do
compare_times while [ "$(find "/mnt/docker-aio-config/session/" -mindepth 1 -exec grep "aio_authenticated|[a-z]:1" {} \; | wc -l)" -gt 1 ]; do
sleep 2 # First delete all session files that are not authenticated
unset SESSION_FILES
SESSION_FILES="$(find "/mnt/docker-aio-config/session/" -mindepth 1)"
unset SESSION_FILES_ARRAY
mapfile -t SESSION_FILES_ARRAY <<< "$SESSION_FILES"
for SESSION_FILE in "${SESSION_FILES_ARRAY[@]}"; do
if [ -f "$SESSION_FILE" ] && ! grep -q "aio_authenticated|[a-z]:1" "$SESSION_FILE"; then
rm "$SESSION_FILE"
fi
done
# Second clean up all sessions that are authenticated
echo "Deleting duplicate sessions"
unset OLDEST_FILE
set -x
# shellcheck disable=SC2012
OLDEST_FILE="$(ls -t "/mnt/docker-aio-config/session/" | tail -1)"
rm "/mnt/docker-aio-config/session/$OLDEST_FILE"
set +x
done
sleep 5
done done

307
Containers/mastercontainer/start.sh Normal file → Executable file
View file

@ -6,12 +6,6 @@ print_green() {
printf "%b%s%b\n" "\e[0;92m" "$TEXT" "\e[0m" printf "%b%s%b\n" "\e[0;92m" "$TEXT" "\e[0m"
} }
# Function to show text in red
print_red() {
local TEXT="$1"
printf "%b%s%b\n" "\e[0;31m" "$TEXT" "\e[0m"
}
# Function to check if number was provided # Function to check if number was provided
check_if_number() { check_if_number() {
case "${1}" in case "${1}" in
@ -20,42 +14,18 @@ case "${1}" in
esac esac
} }
# Check if running as root user
if [ "$EUID" != "0" ]; then
print_red "Container does not run as root user. This is not supported."
exit 1
fi
# Check that the CMD is not overwritten nor set
if [ "$*" != "" ]; then
print_red "Docker run command for AIO is incorrect as a CMD option was given which is not expected."
exit 1
fi
# Check if socket is available and readable # Check if socket is available and readable
if ! [ -e "/var/run/docker.sock" ]; then if ! [ -a "/var/run/docker.sock" ]; then
print_red "Docker socket is not available. Cannot continue." echo "Docker socket is not available. Cannot continue."
echo "Please make sure to mount the docker socket into /var/run/docker.sock inside the container!"
echo "If you did this by purpose because you don't want the container to have access to the docker socket, see https://github.com/nextcloud/all-in-one/tree/main/manual-install."
echo "And https://github.com/nextcloud/all-in-one/blob/main/manual-install/latest.yml"
exit 1 exit 1
elif ! mountpoint -q "/mnt/docker-aio-config"; then elif ! mountpoint -q "/mnt/docker-aio-config"; then
print_red "/mnt/docker-aio-config is not a mountpoint. Cannot proceed!" echo "/mnt/docker-aio-config is not a mountpoint. Cannot proceed!"
echo "Please make sure to mount the nextcloud_aio_mastercontainer docker volume into /mnt/docker-aio-config inside the container!"
echo "If you are on TrueNas SCALE, see https://github.com/nextcloud/all-in-one#can-i-run-aio-on-truenas-scale"
exit 1 exit 1
elif mountpoint -q /var/www/docker-aio/php/containers.json; then elif ! sudo -u www-data test -r /var/run/docker.sock; then
print_red "/var/www/docker-aio/php/containers.json is a mountpoint. Cannot proceed!"
echo "This is a not-supported customization of the mastercontainer!"
echo "Please remove this bind-mount from the mastercontainer."
echo "If you need to customize things, feel free to use https://github.com/nextcloud/all-in-one/tree/main/manual-install"
echo "See https://github.com/nextcloud/all-in-one/blob/main/manual-install/latest.yml"
exit 1
elif ! sudo -E -u www-data test -r /var/run/docker.sock; then
echo "Trying to fix docker.sock permissions internally..." echo "Trying to fix docker.sock permissions internally..."
DOCKER_GROUP=$(stat -c '%G' /var/run/docker.sock) DOCKER_GROUP=$(stat -c '%G' /var/run/docker.sock)
DOCKER_GROUP_ID=$(stat -c '%g' /var/run/docker.sock) DOCKER_GROUP_ID=$(stat -c '%g' /var/run/docker.sock)
# Check if a group with the same group name of /var/run/docker.socket already exists in the container # Check if a group with the same group id of /var/run/docker.socket already exists in the container
if grep -q "^$DOCKER_GROUP:" /etc/group; then if grep -q "^$DOCKER_GROUP:" /etc/group; then
# If yes, add www-data to that group # If yes, add www-data to that group
echo "Adding internal www-data to group $DOCKER_GROUP" echo "Adding internal www-data to group $DOCKER_GROUP"
@ -69,68 +39,25 @@ elif ! sudo -E -u www-data test -r /var/run/docker.sock; then
groupadd -g "$DOCKER_GROUP_ID" docker groupadd -g "$DOCKER_GROUP_ID" docker
usermod -aG docker www-data usermod -aG docker www-data
fi fi
if ! sudo -E -u www-data test -r /var/run/docker.sock; then if ! sudo -u www-data test -r /var/run/docker.sock; then
print_red "Docker socket is not readable by the www-data user. Cannot continue." echo "Docker socket is not readable by the www-data user. Cannot continue."
exit 1 exit 1
fi fi
fi fi
# Get default docker api version # Check if api version is supported
API_VERSION_FILE="$(find ./ -name DockerActionManager.php | head -1)" if ! sudo -u www-data docker info &>/dev/null; then
API_VERSION="$(grep -oP 'const string API_VERSION.*\;' "$API_VERSION_FILE" | grep -oP '[0-9]+.[0-9]+' | head -1)" echo "Cannot connect to the docker socket. Cannot proceed."
if [ -z "$API_VERSION" ]; then
print_red "Could not get API_VERSION. Something is wrong!"
exit 1 exit 1
fi fi
API_VERSION_FILE="$(find ./ -name DockerActionManager.php | head -1)"
# Check if DOCKER_API_VERSION is set globally API_VERSION="$(grep -oP 'const API_VERSION.*\;' "$API_VERSION_FILE" | grep -oP '[0-9]+.[0-9]+' | head -1)"
if [ -n "$DOCKER_API_VERSION" ]; then
if ! echo "$DOCKER_API_VERSION" | grep -q '^[0-9].[0-9]\+$'; then
print_red "You've set DOCKER_API_VERSION but not to an allowed value.
The string must be a version number like e.g. '1.44'.
It is set to '$DOCKER_API_VERSION'."
exit 1
fi
print_red "DOCKER_API_VERSION was found to be set to '$DOCKER_API_VERSION'."
print_red "Please note that only v$API_VERSION is officially supported and tested by the maintainers of Nextcloud AIO."
print_red "So you run on your own risk and things might break without warning."
else
# Export docker api version to use it everywhere
export DOCKER_API_VERSION="$API_VERSION"
fi
# Set a fallback docker api version. Needed for api version check.
# The check will not work otherwise on old docker versions
FALLBACK_DOCKER_API_VERSION="1.41"
# Check if docker info can be used
if ! sudo -E -u www-data docker info &>/dev/null; then
if ! sudo -E -u www-data DOCKER_API_VERSION="$FALLBACK_DOCKER_API_VERSION" docker info &>/dev/null; then
print_red "Cannot connect to the docker socket. Cannot proceed."
echo "Did you maybe remove group read permissions for the docker socket? AIO needs them in order to access the docker socket."
echo "If SELinux is enabled on your host, see https://github.com/nextcloud/all-in-one#are-there-known-problems-when-selinux-is-enabled"
echo "If you are on TrueNas SCALE, see https://github.com/nextcloud/all-in-one#can-i-run-aio-on-truenas-scale"
echo "On macOS, see https://github.com/nextcloud/all-in-one#how-to-run-aio-on-macos"
echo "Another possibility might be that Docker api v$API_VERSION is not supported by your docker daemon."
echo "In that case, you should report this to https://github.com/nextcloud/all-in-one/issues"
echo ""
exit 1
fi
fi
# Docker api version check
# shellcheck disable=SC2001 # shellcheck disable=SC2001
API_VERSION_NUMB="$(echo "$DOCKER_API_VERSION" | sed 's/\.//')" API_VERSION_NUMB="$(echo "$API_VERSION" | sed 's/\.//')"
LOCAL_API_VERSION_NUMB="$(sudo -E -u www-data docker version | grep -i "api version" | grep -oP '[0-9]+.[0-9]+' | head -1 | sed 's/\.//')" LOCAL_API_VERSION_NUMB="$(sudo -u www-data docker version | grep -i "api version" | grep -oP '[0-9]+.[0-9]+' | head -1 | sed 's/\.//')"
if [ -z "$LOCAL_API_VERSION_NUMB" ]; then
LOCAL_API_VERSION_NUMB="$(sudo -E -u www-data DOCKER_API_VERSION="$FALLBACK_DOCKER_API_VERSION" docker version | grep -i "api version" | grep -oP '[0-9]+.[0-9]+' | head -1 | sed 's/\.//')"
fi
if [ -n "$LOCAL_API_VERSION_NUMB" ] && [ -n "$API_VERSION_NUMB" ]; then if [ -n "$LOCAL_API_VERSION_NUMB" ] && [ -n "$API_VERSION_NUMB" ]; then
if ! [ "$LOCAL_API_VERSION_NUMB" -ge "$API_VERSION_NUMB" ]; then if ! [ "$LOCAL_API_VERSION_NUMB" -ge "$API_VERSION_NUMB" ]; then
print_red "Docker API v$DOCKER_API_VERSION is not supported by your docker engine. Cannot proceed. Please upgrade your docker engine if you want to run Nextcloud AIO!" echo "Docker API v$API_VERSION is not supported by your docker engine. Cannot proceed. Please upgrade your docker engine if you want to run Nextcloud AIO!"
echo "Alternatively, set the DOCKER_API_VERSION environmental variable to a compatible version."
echo "However please note that only v$API_VERSION is officially supported and tested by the maintainers of Nextcloud AIO."
echo "See https://github.com/nextcloud/all-in-one#how-to-adjust-the-internally-used-docker-api-version"
exit 1 exit 1
fi fi
else else
@ -138,71 +65,48 @@ else
sleep 10 sleep 10
fi fi
# Check Storage drivers
STORAGE_DRIVER="$(sudo -E -u www-data docker info | grep "Storage Driver")"
# Check if vfs is used: https://github.com/nextcloud/all-in-one/discussions/1467
if echo "$STORAGE_DRIVER" | grep -q vfs; then
echo "$STORAGE_DRIVER"
print_red "Warning: It seems like the storage driver vfs is used. This will lead to problems with disk space and performance and is disrecommended!"
elif echo "$STORAGE_DRIVER" | grep -q fuse-overlayfs; then
echo "$STORAGE_DRIVER"
print_red "Warning: It seems like the storage driver fuse-overlayfs is used. Please check if you can switch to overlay2 instead."
fi
# Check if snap install
if sudo -E -u www-data docker info | grep "Docker Root Dir" | grep "/var/snap/docker/"; then
print_red "Warning: It looks like your installation uses docker installed via snap."
print_red "This comes with some limitations and is disrecommended by the docker maintainers."
print_red "See for example https://github.com/nextcloud/all-in-one/discussions/4890#discussioncomment-10386752"
fi
# Check if startup command was executed correctly # Check if startup command was executed correctly
if ! sudo -E -u www-data docker ps --format "{{.Names}}" | grep -q "^nextcloud-aio-mastercontainer$"; then if ! sudo -u www-data docker ps | grep -q "nextcloud-aio-mastercontainer"; then
print_red "It seems like you did not give the mastercontainer the correct name? (The 'nextcloud-aio-mastercontainer' container was not found.) echo "It seems like you did not give the mastercontainer the correct name?
Using a different name is not supported since mastercontainer updates will not work in that case! Using a different name is not supported!"
If you are on docker swarm and try to run AIO, see https://github.com/nextcloud/all-in-one#can-i-run-this-with-docker-swarm"
exit 1 exit 1
elif ! sudo -E -u www-data docker volume ls --format "{{.Name}}" | grep -q "^nextcloud_aio_mastercontainer$"; then elif ! sudo -u www-data docker volume ls | grep -q "nextcloud_aio_mastercontainer"; then
print_red "It seems like you did not give the mastercontainer volume the correct name? (The 'nextcloud_aio_mastercontainer' volume was not found.) echo "It seems like you did not give the mastercontainer volume the correct name?
Using a different name is not supported since the built-in backup solution will not work in that case!" Using a different name is not supported!"
exit 1
elif ! sudo -E -u www-data docker inspect nextcloud-aio-mastercontainer --format '{{.Mounts}}' | grep -q " nextcloud_aio_mastercontainer "; then
print_red "It seems like you did not attach the 'nextcloud_aio_mastercontainer' volume to the mastercontainer?
This is not supported since the built-in backup solution will not work in that case!"
exit 1 exit 1
fi fi
# Check for other options # Check for other options
if [ -n "$NEXTCLOUD_DATADIR" ]; then if [ -n "$NEXTCLOUD_DATADIR" ]; then
if [ "$NEXTCLOUD_DATADIR" = "nextcloud_aio_nextcloud_datadir" ]; then if [ "$NEXTCLOUD_DATADIR" = "nextcloud_aio_nextcloud_datadir" ]; then
sleep 1 echo "NEXTCLOUD_DATADIR is set to $NEXTCLOUD_DATADIR"
elif ! echo "$NEXTCLOUD_DATADIR" | grep -q "^/" || [ "$NEXTCLOUD_DATADIR" = "/" ]; then elif ! echo "$NEXTCLOUD_DATADIR" | grep -q "^/" || [ "$NEXTCLOUD_DATADIR" = "/" ]; then
print_red "You've set NEXTCLOUD_DATADIR but not to an allowed value. echo "You've set NEXTCLOUD_DATADIR but not to an allowed value.
The string must start with '/' and must not be equal to '/'. Also allowed is 'nextcloud_aio_nextcloud_datadir'. The string must start with '/' and must not be equal to '/'.
It is set to '$NEXTCLOUD_DATADIR'." It is set to '$NEXTCLOUD_DATADIR'."
exit 1 exit 1
fi fi
fi fi
if [ -n "$NEXTCLOUD_MOUNT" ]; then if [ -n "$NEXTCLOUD_MOUNT" ]; then
if ! echo "$NEXTCLOUD_MOUNT" | grep -q "^/" || [ "$NEXTCLOUD_MOUNT" = "/" ]; then if ! echo "$NEXTCLOUD_MOUNT" | grep -q "^/" || [ "$NEXTCLOUD_MOUNT" = "/" ]; then
print_red "You've set NEXTCLOUD_MOUNT but not to an allowed value. echo "You've set NEXCLOUD_MOUNT but not to an allowed value.
The string must start with '/' and must not be equal to '/'. The string must start with '/' and must not be equal to '/'.
It is set to '$NEXTCLOUD_MOUNT'." It is set to '$NEXTCLOUD_MOUNT'."
exit 1 exit 1
elif [ "$NEXTCLOUD_MOUNT" = "/mnt/ncdata" ] || echo "$NEXTCLOUD_MOUNT" | grep -q "^/mnt/ncdata/"; then elif [ "$NEXTCLOUD_MOUNT" = "/mnt/ncdata" ] || echo "$NEXTCLOUD_MOUNT" | grep -q "^/mnt/ncdata/"; then
print_red "'/mnt/ncdata' and '/mnt/ncdata/' are not allowed as values for NEXTCLOUD_MOUNT." echo "'/mnt/ncdata' and '/mnt/ncdata/' are not allowed as values for NEXTCLOUD_MOUNT."
exit 1 exit 1
fi fi
fi fi
if [ -n "$NEXTCLOUD_DATADIR" ] && [ -n "$NEXTCLOUD_MOUNT" ]; then if [ -n "$NEXTCLOUD_DATADIR" ] && [ -n "$NEXTCLOUD_MOUNT" ]; then
if [ "$NEXTCLOUD_DATADIR" = "$NEXTCLOUD_MOUNT" ]; then if [ "$NEXTCLOUD_DATADIR" = "$NEXTCLOUD_MOUNT" ]; then
print_red "NEXTCLOUD_DATADIR and NEXTCLOUD_MOUNT are not allowed to be equal." echo "NEXTCLOUD_DATADIR and NEXTCLOUD_MOUNT are not allowed to be equal."
exit 1 exit 1
fi fi
fi fi
if [ -n "$NEXTCLOUD_UPLOAD_LIMIT" ]; then if [ -n "$NEXTCLOUD_UPLOAD_LIMIT" ]; then
if ! echo "$NEXTCLOUD_UPLOAD_LIMIT" | grep -q '^[0-9]\+G$'; then if ! echo "$NEXTCLOUD_UPLOAD_LIMIT" | grep -q '^[0-9]\+G$'; then
print_red "You've set NEXTCLOUD_UPLOAD_LIMIT but not to an allowed value. echo "You've set NEXTCLOUD_UPLOAD_LIMIT but not to an allowed value.
The string must start with a number and end with 'G'. The string must start with a number and end with 'G'.
It is set to '$NEXTCLOUD_UPLOAD_LIMIT'." It is set to '$NEXTCLOUD_UPLOAD_LIMIT'."
exit 1 exit 1
@ -210,153 +114,72 @@ It is set to '$NEXTCLOUD_UPLOAD_LIMIT'."
fi fi
if [ -n "$NEXTCLOUD_MAX_TIME" ]; then if [ -n "$NEXTCLOUD_MAX_TIME" ]; then
if ! echo "$NEXTCLOUD_MAX_TIME" | grep -q '^[0-9]\+$'; then if ! echo "$NEXTCLOUD_MAX_TIME" | grep -q '^[0-9]\+$'; then
print_red "You've set NEXTCLOUD_MAX_TIME but not to an allowed value. echo "You've set NEXTCLOUD_MAX_TIME but not to an allowed value.
The string must be a number. E.g. '3600'. The string must be a number. E.g. '3600'.
It is set to '$NEXTCLOUD_MAX_TIME'." It is set to '$NEXTCLOUD_MAX_TIME'."
exit 1 exit 1
fi fi
fi fi
if [ -n "$NEXTCLOUD_MEMORY_LIMIT" ]; then
if ! echo "$NEXTCLOUD_MEMORY_LIMIT" | grep -q '^[0-9]\+M$'; then
print_red "You've set NEXTCLOUD_MEMORY_LIMIT but not to an allowed value.
The string must start with a number and end with 'M'.
It is set to '$NEXTCLOUD_MEMORY_LIMIT'."
exit 1
fi
fi
if [ -n "$APACHE_PORT" ]; then if [ -n "$APACHE_PORT" ]; then
if ! check_if_number "$APACHE_PORT"; then if ! check_if_number "$APACHE_PORT"; then
print_red "You provided an Apache port but did not only use numbers. echo "You provided an Apache port but did not only use numbers.
It is set to '$APACHE_PORT'." It is set to '$APACHE_PORT'."
exit 1 exit 1
elif ! [ "$APACHE_PORT" -le 65535 ] || ! [ "$APACHE_PORT" -ge 1 ]; then elif ! [ "$APACHE_PORT" -le 65535 ] || ! [ "$APACHE_PORT" -ge 1 ]; then
print_red "The provided Apache port is invalid. It must be between 1 and 65535" echo "The provided Apache port is invalid. It must be between 1 and 65535"
exit 1 exit 1
fi fi
fi fi
if [ -n "$APACHE_IP_BINDING" ]; then if [ -n "$APACHE_IP_BINDING" ]; then
if ! echo "$APACHE_IP_BINDING" | grep -q '^[0-9]\+\.[0-9]\+\.[0-9]\+\.[0-9]\+$\|^[0-9a-f:]\+$\|^@INTERNAL$'; then if ! echo "$APACHE_IP_BINDING" | grep -q '^[0-9.]\+$'; then
print_red "You provided an ip-address for the apache container's ip-binding but it was not a valid ip-address. echo "You provided an ip-address for the apache container's ip-binding but it was not a valid ip-address.
It is set to '$APACHE_IP_BINDING'." It is set to '$APACHE_IP_BINDING'."
exit 1 exit 1
fi fi
fi fi
if [ -n "$APACHE_ADDITIONAL_NETWORK" ]; then
if ! echo "$APACHE_ADDITIONAL_NETWORK" | grep -q "^[a-zA-Z0-9._-]\+$"; then
print_red "You've set APACHE_ADDITIONAL_NETWORK but not to an allowed value.
It needs to be a string with letters, numbers, hyphens and underscores.
It is set to '$APACHE_ADDITIONAL_NETWORK'."
exit 1
fi
fi
if [ -n "$TALK_PORT" ]; then if [ -n "$TALK_PORT" ]; then
if ! check_if_number "$TALK_PORT"; then if ! check_if_number "$TALK_PORT"; then
print_red "You provided an Talk port but did not only use numbers. echo "You provided an Talk port but did not only use numbers.
It is set to '$TALK_PORT'." It is set to '$TALK_PORT'."
exit 1 exit 1
elif ! [ "$TALK_PORT" -le 65535 ] || ! [ "$TALK_PORT" -ge 1 ]; then elif ! [ "$TALK_PORT" -le 65535 ] || ! [ "$TALK_PORT" -ge 1 ]; then
print_red "The provided Talk port is invalid. It must be between 1 and 65535" echo "The provided Talk port is invalid. It must be between 1 and 65535"
exit 1 exit 1
fi fi
fi fi
if [ -n "$APACHE_PORT" ] && [ -n "$TALK_PORT" ]; then if [ -n "$APACHE_PORT" ] && [ -n "$TALK_PORT" ]; then
if [ "$APACHE_PORT" = "$TALK_PORT" ]; then if [ "$APACHE_PORT" = "$TALK_PORT" ]; then
print_red "APACHE_PORT and TALK_PORT are not allowed to be equal." echo "APACHE_PORT and TALK_PORT are not allowed to be equal."
exit 1 exit 1
fi fi
fi fi
if [ -n "$WATCHTOWER_DOCKER_SOCKET_PATH" ]; then if [ -n "$DOCKER_SOCKET_PATH" ]; then
if ! echo "$WATCHTOWER_DOCKER_SOCKET_PATH" | grep -q "^/" || echo "$WATCHTOWER_DOCKER_SOCKET_PATH" | grep -q "/$"; then if ! echo "$DOCKER_SOCKET_PATH" | grep -q "^/" || echo "$DOCKER_SOCKET_PATH" | grep -q "/$"; then
print_red "You've set WATCHTOWER_DOCKER_SOCKET_PATH but not to an allowed value. echo "You've set DOCKER_SOCKET_PATH but not to an allowed value.
The string must start with '/' and must not end with '/'. The string must start with '/' and must not end with '/'.
It is set to '$WATCHTOWER_DOCKER_SOCKET_PATH'." It is set to '$DOCKER_SOCKET_PATH'."
exit 1 exit 1
fi fi
fi fi
if [ -n "$NEXTCLOUD_TRUSTED_CACERTS_DIR" ]; then if [ -n "$TRUSTED_CACERTS_DIR" ]; then
if ! echo "$NEXTCLOUD_TRUSTED_CACERTS_DIR" | grep -q "^/" || echo "$NEXTCLOUD_TRUSTED_CACERTS_DIR" | grep -q "/$"; then if ! echo "$TRUSTED_CACERTS_DIR" | grep -q "^/" || echo "$TRUSTED_CACERTS_DIR" | grep -q "/$"; then
print_red "You've set NEXTCLOUD_TRUSTED_CACERTS_DIR but not to an allowed value. echo "You've set TRUSTED_CACERTS_DIR but not to an allowed value.
It should be an absolute path to a directory that starts with '/' but not end with '/'. It should be an absolute path to a directory that starts with '/' but not end with '/'.
It is set to '$NEXTCLOUD_TRUSTED_CACERTS_DIR '." It is set to '$TRUSTED_CACERTS_DIR '."
exit 1 exit 1
fi fi
fi fi
if [ -n "$NEXTCLOUD_STARTUP_APPS" ]; then
if ! echo "$NEXTCLOUD_STARTUP_APPS" | grep -q "^[a-z0-9 _-]\+$"; then
print_red "You've set NEXTCLOUD_STARTUP_APPS but not to an allowed value.
It needs to be a string. Allowed are small letters a-z, 0-9, spaces, hyphens and '_'.
It is set to '$NEXTCLOUD_STARTUP_APPS'."
exit 1
fi
fi
if [ -n "$NEXTCLOUD_ADDITIONAL_APKS" ]; then
if ! echo "$NEXTCLOUD_ADDITIONAL_APKS" | grep -q "^[a-z0-9 ._-]\+$"; then
print_red "You've set NEXTCLOUD_ADDITIONAL_APKS but not to an allowed value.
It needs to be a string. Allowed are small letters a-z, digits 0-9, spaces, hyphens, dots and '_'.
It is set to '$NEXTCLOUD_ADDITIONAL_APKS'."
exit 1
fi
fi
if [ -n "$NEXTCLOUD_ADDITIONAL_PHP_EXTENSIONS" ]; then
if ! echo "$NEXTCLOUD_ADDITIONAL_PHP_EXTENSIONS" | grep -q "^[a-z0-9 ._-]\+$"; then
print_red "You've set NEXTCLOUD_ADDITIONAL_PHP_EXTENSIONS but not to an allowed value.
It needs to be a string. Allowed are small letters a-z, digits 0-9, spaces, hyphens, dots and '_'.
It is set to '$NEXTCLOUD_ADDITIONAL_PHP_EXTENSIONS'."
exit 1
fi
fi
if [ -n "$AIO_COMMUNITY_CONTAINERS" ]; then
print_red "You've set AIO_COMMUNITY_CONTAINERS but the option was removed.
The community containers get managed via the AIO interface now."
fi
# Check if ghcr.io is reachable # Check DNS resolution
# Solves issues like https://github.com/nextcloud/all-in-one/discussions/5268 # Prevents issues like https://github.com/nextcloud/all-in-one/discussions/565
if ! curl --no-progress-meter https://ghcr.io/v2/ >/dev/null; then curl https://nextcloud.com &>/dev/null
print_red "Could not reach https://ghcr.io." if [ "$?" = 6 ]; then
echo "Most likely is something blocking access to it." echo "Could not resolve the host nextcloud.com."
echo "You should be able to fix this by following https://dockerlabs.collabnix.com/intermediate/networking/Configuring_DNS.html" echo "Most likely the DNS resolving does not work."
echo "Another solution is using https://github.com/nextcloud/all-in-one/tree/main/manual-install" echo "You should be able to fix this by adding the '--dns=\"ip.address.of.dns.server\"' option to the docker run command."
echo "See https://github.com/nextcloud/all-in-one/blob/main/manual-install/latest.yml"
exit 1 exit 1
fi fi
# Check that no changes have been made to timezone settings since AIO only supports running in Etc/UTC timezone
if [ -n "$TZ" ]; then
print_red "The environmental variable TZ has been set which is not supported by AIO since it only supports running in the default Etc/UTC timezone!"
echo "The correct timezone can be set in the AIO interface later on!"
# Disable exit since it seems to be by default set on unraid and we dont want to break these instances
# exit 1
fi
# Check that http proxy or no_proxy variable is not set which AIO does not support
if [ -n "$HTTP_PROXY" ] || [ -n "$http_proxy" ] || [ -n "$HTTPS_PROXY" ] || [ -n "$https_proxy" ] || [ -n "$NO_PROXY" ] || [ -n "$no_proxy" ]; then
print_red "The environmental variable HTTP_PROXY, http_proxy, HTTPS_PROXY, https_proxy, NO_PROXY or no_proxy has been set which is not supported by AIO."
echo "If you need this, then you should use https://github.com/nextcloud/all-in-one/tree/main/manual-install"
echo "See https://github.com/nextcloud/all-in-one/blob/main/manual-install/latest.yml"
exit 1
fi
if mountpoint -q /etc/localtime; then
print_red "/etc/localtime has been mounted into the container which is not allowed because AIO only supports running in the default Etc/UTC timezone!"
echo "The correct timezone can be set in the AIO interface later on!"
exit 1
fi
if mountpoint -q /etc/timezone; then
print_red "/etc/timezone has been mounted into the container which is not allowed because AIO only supports running in the default Etc/UTC timezone!"
echo "The correct timezone can be set in the AIO interface later on!"
exit 1
fi
# Check if unsupported env are set (but don't exit as it would break many instances)
if [ -n "$APACHE_DISABLE_REWRITE_IP" ]; then
print_red "The environmental variable APACHE_DISABLE_REWRITE_IP has been set which is not supported by AIO. Please remove it!"
fi
if [ -n "$NEXTCLOUD_TRUSTED_DOMAINS" ]; then
print_red "The environmental variable NEXTCLOUD_TRUSTED_DOMAINS has been set which is not supported by AIO. Please remove it!"
fi
if [ -n "$TRUSTED_PROXIES" ]; then
print_red "The environmental variable TRUSTED_PROXIES has been set which is not supported by AIO. Please remove it!"
fi
# Add important folders # Add important folders
mkdir -p /mnt/docker-aio-config/data/ mkdir -p /mnt/docker-aio-config/data/
mkdir -p /mnt/docker-aio-config/session/ mkdir -p /mnt/docker-aio-config/session/
@ -373,8 +196,8 @@ chown root:root -R /mnt/docker-aio-config/certs/
# Don't allow access to the AIO interface from the Nextcloud container # Don't allow access to the AIO interface from the Nextcloud container
# Probably more cosmetic than anything but at least an attempt # Probably more cosmetic than anything but at least an attempt
if ! grep -q '# nextcloud-aio-block' /etc/apache2/httpd.conf; then if ! grep -q '# nextcloud-aio-block' /etc/apache2/apache2.conf; then
cat << APACHE_CONF >> /etc/apache2/httpd.conf cat << APACHE_CONF >> /etc/apache2/apache2.conf
# nextcloud-aio-block-start # nextcloud-aio-block-start
<Location /> <Location />
order allow,deny order allow,deny
@ -401,30 +224,14 @@ if [ -f ./ssl.crt ] && [ -f ./ssl.key ]; then
cp "$GENERATED_CERTS/ssl.key" ./ cp "$GENERATED_CERTS/ssl.key" ./
fi fi
print_green "Initial startup of Nextcloud All-in-One complete! print_green "Initial startup of Nextcloud All In One complete!
You should be able to open the Nextcloud AIO Interface now on port 8080 of this server! You should be able to open the Nextcloud AIO Interface now on port 8080 of this server!
E.g. https://internal.ip.of.this.server:8080 E.g. https://internal.ip.of.this.server:8080
⚠️ Important: do always use an ip-address if you access this port and not a domain as HSTS might block access to it later!
If your server has port 80 and 8443 open and you point a domain to your server, you can get a valid certificate automatically by opening the Nextcloud AIO Interface via: If your server has port 80 and 8443 open and you point a domain to your server, you can get a valid certificate automatically by opening the Nextcloud AIO Interface via:
https://your-domain-that-points-to-this-server.tld:8443" https://your-domain-that-points-to-this-server.tld:8443"
# Set the timezone to Etc/UTC # Set the timezone to UTC
export TZ=Etc/UTC export TZ=UTC
# Fix apache startup exec "$@"
rm -f /var/run/apache2/httpd.pid
# Fix caddy startup
if [ -d "/mnt/docker-aio-config/caddy/locks" ]; then
rm -rf /mnt/docker-aio-config/caddy/locks/*
fi
# Fix the Caddyfile format
caddy fmt --overwrite /Caddyfile
# Fix caddy log
chmod 777 /root
# Start supervisord
exec /usr/bin/supervisord -c /supervisord.conf

View file

@ -1,36 +1,26 @@
[supervisord] [supervisord]
nodaemon=true nodaemon=true
nodaemon=true
logfile=/var/log/supervisord/supervisord.log logfile=/var/log/supervisord/supervisord.log
pidfile=/var/run/supervisord/supervisord.pid pidfile=/var/run/supervisord/supervisord.pid
childlogdir=/var/log/supervisord/ childlogdir=/var/log/supervisord/
logfile_maxbytes=50MB logfile_maxbytes=50MB
logfile_backups=10 logfile_backups=10
loglevel=error loglevel=error
user=root
[program:php-fpm]
# Stdout logging is disabled as otherwise the logs are spammed
stdout_logfile=NONE
stderr_logfile=/dev/stderr
stderr_logfile_maxbytes=0
command=php-fpm
user=root
[program:apache] [program:apache]
# Stdout logging is disabled as otherwise the logs are spammed stdout_logfile=/dev/stdout
stdout_logfile=NONE stdout_logfile_maxbytes=0
stderr_logfile=/dev/stderr stderr_logfile=/dev/stderr
stderr_logfile_maxbytes=0 stderr_logfile_maxbytes=0
command=httpd -DFOREGROUND command=apache2-foreground
user=root
[program:caddy] [program:caddy]
stdout_logfile=/dev/stdout stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0 stdout_logfile_maxbytes=0
stderr_logfile=/dev/stderr stderr_logfile=/dev/stderr
stderr_logfile_maxbytes=0 stderr_logfile_maxbytes=0
command=/usr/bin/caddy run --config /Caddyfile command=sudo -u www-data /usr/bin/caddy run -config /Caddyfile
user=www-data
[program:cron] [program:cron]
stdout_logfile=/dev/stdout stdout_logfile=/dev/stdout
@ -38,7 +28,6 @@ stdout_logfile_maxbytes=0
stderr_logfile=/dev/stderr stderr_logfile=/dev/stderr
stderr_logfile_maxbytes=0 stderr_logfile_maxbytes=0
command=/cron.sh command=/cron.sh
user=root
[program:backup-time-file-watcher] [program:backup-time-file-watcher]
stdout_logfile=/dev/stdout stdout_logfile=/dev/stdout
@ -46,7 +35,6 @@ stdout_logfile_maxbytes=0
stderr_logfile=/dev/stderr stderr_logfile=/dev/stderr
stderr_logfile_maxbytes=0 stderr_logfile_maxbytes=0
command=/backup-time-file-watcher.sh command=/backup-time-file-watcher.sh
user=root
[program:session-deduplicator] [program:session-deduplicator]
stdout_logfile=/dev/stdout stdout_logfile=/dev/stdout
@ -54,11 +42,3 @@ stdout_logfile_maxbytes=0
stderr_logfile=/dev/stderr stderr_logfile=/dev/stderr
stderr_logfile_maxbytes=0 stderr_logfile_maxbytes=0
command=/session-deduplicator.sh command=/session-deduplicator.sh
user=root
[program:domain-validator]
# Logging is disabled as otherwise all attempts will be logged which spams the logs
stdout_logfile=NONE
stderr_logfile=NONE
command=php -S 127.0.0.1:9876 /var/www/docker-aio/php/domain-validator.php
user=www-data

View file

@ -1,95 +1,72 @@
# syntax=docker/dockerfile:latest # From https://github.com/nextcloud/docker/blob/master/23/fpm-alpine/Dockerfile
FROM php:8.3.30-fpm-alpine3.23 FROM php:8.0.23-fpm-alpine3.16
ENV PHP_MEMORY_LIMIT=512M
ENV PHP_UPLOAD_LIMIT=16G
ENV PHP_MAX_TIME=3600
ENV SOURCE_LOCATION=/usr/src/nextcloud
ENV REDIS_DB_INDEX=0
# AIO settings start # Do not remove or change this line!
ENV NEXTCLOUD_VERSION=32.0.5
ENV AIO_TOKEN=123456
ENV AIO_URL=localhost
# AIO settings end # Do not remove or change this line!
COPY --chmod=775 Containers/nextcloud/*.sh /
COPY --chmod=774 Containers/nextcloud/upgrade.exclude /upgrade.exclude
COPY Containers/nextcloud/config/*.php /
COPY Containers/nextcloud/supervisord.conf /supervisord.conf
# AIO cloning start # Do not remove or change this line!
COPY app /usr/src/nextcloud/apps/nextcloud-aio
COPY Containers/nextcloud/root.motd /root.motd
# AIO cloning end # Do not remove or change this line!
VOLUME /mnt/ncdata
VOLUME /var/www/html
# Custom: change id of www-data user as it needs to be the same like on old installations # Custom: change id of www-data user as it needs to be the same like on old installations
# hadolint ignore=SC2086,DL3003
RUN set -ex; \ RUN set -ex; \
apk upgrade --no-cache -a; \
apk add --no-cache shadow; \ apk add --no-cache shadow; \
deluser www-data; \ deluser www-data; \
groupmod -g 333 xfs; \
usermod -u 333 -g 333 xfs; \
addgroup -g 33 -S www-data; \ addgroup -g 33 -S www-data; \
adduser -u 33 -D -S -G www-data www-data; \ adduser -u 33 -D -S -G www-data www-data
\
# entrypoint.sh and cron.sh dependencies # entrypoint.sh and cron.sh dependencies
RUN set -ex; \
\
apk add --no-cache \ apk add --no-cache \
rsync \ rsync \
; \ ;
# install the PHP extensions we need # install the PHP extensions we need
# see https://docs.nextcloud.com/server/stable/admin_manual/installation/source_installation.html # see https://docs.nextcloud.com/server/stable/admin_manual/installation/source_installation.html
ENV PHP_MEMORY_LIMIT 512M
ENV PHP_UPLOAD_LIMIT 10G
ENV PHP_MAX_TIME 3600
RUN set -ex; \
\
apk add --no-cache --virtual .build-deps \ apk add --no-cache --virtual .build-deps \
$PHPIZE_DEPS \ $PHPIZE_DEPS \
autoconf \ autoconf \
freetype-dev \ freetype-dev \
gmp-dev \
icu-dev \ icu-dev \
imagemagick-dev \
imagemagick-svg \
imagemagick-heic \
imagemagick-tiff \
libevent-dev \ libevent-dev \
libjpeg-turbo-dev \ libjpeg-turbo-dev \
libmcrypt-dev \ libmcrypt-dev \
libmemcached-dev \
libpng-dev \ libpng-dev \
libwebp-dev \ libmemcached-dev \
libxml2-dev \ libxml2-dev \
libzip-dev \ libzip-dev \
openldap-dev \ openldap-dev \
pcre-dev \ pcre-dev \
postgresql-dev \ postgresql-dev \
imagemagick-dev \
libwebp-dev \
gmp-dev \
; \ ; \
\ \
docker-php-ext-configure gd --with-freetype --with-jpeg --with-webp; \ docker-php-ext-configure gd --with-freetype --with-jpeg --with-webp; \
docker-php-ext-configure ftp --with-openssl-dir=/usr; \
docker-php-ext-configure ldap; \ docker-php-ext-configure ldap; \
docker-php-ext-install -j "$(nproc)" \ docker-php-ext-install -j "$(nproc)" \
bcmath \ bcmath \
exif \ exif \
gd \ gd \
gmp \
intl \ intl \
ldap \ ldap \
opcache \ opcache \
pcntl \ pcntl \
pdo_mysql \
pdo_pgsql \ pdo_pgsql \
sysvsem \
zip \ zip \
gmp \
; \ ; \
\ \
# pecl will claim success even if one install fails, so we need to perform each install separately # pecl will claim success even if one install fails, so we need to perform each install separately
pecl install -o igbinary-3.2.16; \ pecl install APCu-5.1.22; \
pecl install APCu-5.1.28; \ pecl install memcached-3.2.0; \
pecl install -D 'enable-memcached-igbinary="yes"' memcached-3.4.0; \ pecl install redis-5.3.7; \
pecl install -oD 'enable-redis-igbinary="yes" enable-redis-zstd="yes" enable-redis-lz4="yes"' redis-6.3.0; \ pecl install imagick-3.7.0; \
pecl install -o imagick-3.8.1; \
\ \
docker-php-ext-enable \ docker-php-ext-enable \
igbinary \
apcu \ apcu \
memcached \ memcached \
redis \ redis \
@ -103,61 +80,43 @@ RUN set -ex; \
| sort -u \ | sort -u \
| awk 'system("[ -e /usr/local/lib/" $1 " ]") == 0 { next } { print "so:" $1 }' \ | awk 'system("[ -e /usr/local/lib/" $1 " ]") == 0 { next } { print "so:" $1 }' \
)"; \ )"; \
apk add --no-cache --virtual .nextcloud-phpext-rundeps $runDeps; \ apk add --virtual .nextcloud-phpext-rundeps $runDeps; \
apk del .build-deps; \ apk del .build-deps
\
{ \
echo 'apc.serializer=igbinary'; \
echo 'session.serialize_handler=igbinary'; \
} >> /usr/local/etc/php/conf.d/docker-php-ext-igbinary.ini; \
\
# set recommended PHP.ini settings # set recommended PHP.ini settings
# see https://docs.nextcloud.com/server/stable/admin_manual/installation/server_tuning.html#enable-php-opcache and below # see https://docs.nextcloud.com/server/stable/admin_manual/configuration_server/server_tuning.html#enable-php-opcache
{ \ RUN { \
echo 'opcache.max_accelerated_files=10000'; \ echo 'opcache.interned_strings_buffer=32'; \
echo 'opcache.memory_consumption=256'; \
echo 'opcache.interned_strings_buffer=64'; \
echo 'opcache.save_comments=1'; \ echo 'opcache.save_comments=1'; \
echo 'opcache.revalidate_freq=60'; \ echo 'opcache.revalidate_freq=60'; \
echo 'opcache.jit=1255'; \
echo 'opcache.jit_buffer_size=8M'; \
} > /usr/local/etc/php/conf.d/opcache-recommended.ini; \ } > /usr/local/etc/php/conf.d/opcache-recommended.ini; \
\ \
{ \ echo 'apc.enable_cli=1' >> /usr/local/etc/php/conf.d/docker-php-ext-apcu.ini; \
echo 'apc.enable_cli=1'; \
echo 'apc.shm_size=64M'; \
} >> /usr/local/etc/php/conf.d/docker-php-ext-apcu.ini; \
\ \
{ \ { \
echo 'memory_limit=${PHP_MEMORY_LIMIT}'; \ echo 'memory_limit=${PHP_MEMORY_LIMIT}'; \
echo 'upload_max_filesize=${PHP_UPLOAD_LIMIT}'; \ echo 'upload_max_filesize=${PHP_UPLOAD_LIMIT}'; \
echo 'post_max_size=${PHP_UPLOAD_LIMIT}'; \ echo 'post_max_size=${PHP_UPLOAD_LIMIT}'; \
echo 'max_execution_time=${PHP_MAX_TIME}'; \ echo 'max_execution_time=${PHP_MAX_TIME}'; \
echo 'max_input_time=-1'; \ echo 'max_input_time=${PHP_MAX_TIME}'; \
echo 'default_socket_timeout=${PHP_MAX_TIME}'; \
} > /usr/local/etc/php/conf.d/nextcloud.ini; \ } > /usr/local/etc/php/conf.d/nextcloud.ini; \
\ \
{ \ mkdir /var/www/data; \
echo 'session.save_handler = redis'; \
echo 'session.save_path = "tcp://${REDIS_HOST}:${REDIS_PORT}?database=${REDIS_DB_INDEX}${REDIS_USER_AUTH}&auth[]=${REDIS_HOST_PASSWORD}"'; \
echo 'redis.session.locking_enabled = 1'; \
echo 'redis.session.lock_retries = -1'; \
echo 'redis.session.lock_wait_time = 10000'; \
echo 'session.gc_maxlifetime = 86400'; \
} > /usr/local/etc/php/conf.d/redis-session.ini; \
\
mkdir -p /var/www/data; \
chown -R www-data:root /var/www; \ chown -R www-data:root /var/www; \
chmod -R g=u /var/www; \ chmod -R g=u /var/www
\
# Download Nextcloud archive start # Do not remove or change this line! VOLUME /var/www/html
ENV NEXTCLOUD_VERSION 24.0.5
RUN set -ex; \
apk add --no-cache --virtual .fetch-deps \ apk add --no-cache --virtual .fetch-deps \
bzip2 \ bzip2 \
gnupg \ gnupg \
; \ ; \
\ \
curl -fsSL -o nextcloud.tar.bz2 \ curl -fsSL -o nextcloud.tar.bz2 \
"https://github.com/nextcloud-releases/server/releases/download/v${NEXTCLOUD_VERSION}/nextcloud-${NEXTCLOUD_VERSION}.tar.bz2"; \ "https://download.nextcloud.com/server/releases/nextcloud-${NEXTCLOUD_VERSION}.tar.bz2"; \
curl -fsSL -o nextcloud.tar.bz2.asc \ curl -fsSL -o nextcloud.tar.bz2.asc \
"https://download.nextcloud.com/server/releases/nextcloud-${NEXTCLOUD_VERSION}.tar.bz2.asc"; \ "https://download.nextcloud.com/server/releases/nextcloud-${NEXTCLOUD_VERSION}.tar.bz2.asc"; \
export GNUPGHOME="$(mktemp -d)"; \ export GNUPGHOME="$(mktemp -d)"; \
@ -167,22 +126,32 @@ RUN set -ex; \
tar -xjf nextcloud.tar.bz2 -C /usr/src/; \ tar -xjf nextcloud.tar.bz2 -C /usr/src/; \
gpgconf --kill all; \ gpgconf --kill all; \
rm nextcloud.tar.bz2.asc nextcloud.tar.bz2; \ rm nextcloud.tar.bz2.asc nextcloud.tar.bz2; \
rm -rf "$GNUPGHOME" /usr/src/nextcloud/updater; \
mkdir -p /usr/src/nextcloud/data; \ mkdir -p /usr/src/nextcloud/data; \
mkdir -p /usr/src/nextcloud/custom_apps; \ mkdir -p /usr/src/nextcloud/custom_apps; \
chmod +x /usr/src/nextcloud/occ; \ chmod +x /usr/src/nextcloud/occ; \
mkdir -p /usr/src/nextcloud/config; \ apk del .fetch-deps
apk del .fetch-deps; \
# Download Nextcloud archive end # Do not remove or change this line! COPY *.sh upgrade.exclude /
mv /*.php /usr/src/nextcloud/config/; \ COPY config/* /usr/src/nextcloud/config/
\
ENTRYPOINT ["/entrypoint.sh"]
CMD ["php-fpm"]
# Template from https://github.com/nextcloud/docker/blob/master/.examples/dockerfiles/full/fpm-alpine/Dockerfile # Template from https://github.com/nextcloud/docker/blob/master/.examples/dockerfiles/full/fpm-alpine/Dockerfile
RUN set -ex; \
\
apk add --no-cache \ apk add --no-cache \
ffmpeg \ ffmpeg \
imagemagick \
procps \ procps \
samba-client \ samba-client \
supervisor \ supervisor \
# libreoffice \ # libreoffice \
; \ ;
RUN set -ex; \
\ \
apk add --no-cache --virtual .build-deps \ apk add --no-cache --virtual .build-deps \
$PHPIZE_DEPS \ $PHPIZE_DEPS \
@ -191,15 +160,12 @@ RUN set -ex; \
openssl-dev \ openssl-dev \
samba-dev \ samba-dev \
bzip2-dev \ bzip2-dev \
libpq-dev \
; \ ; \
\ \
docker-php-ext-configure imap --with-kerberos --with-imap-ssl; \ docker-php-ext-configure imap --with-kerberos --with-imap-ssl; \
docker-php-ext-install \ docker-php-ext-install \
bz2 \ bz2 \
imap \ imap \
pgsql \
ftp \
; \ ; \
pecl install smbclient; \ pecl install smbclient; \
docker-php-ext-enable smbclient; \ docker-php-ext-enable smbclient; \
@ -210,15 +176,22 @@ RUN set -ex; \
| sort -u \ | sort -u \
| awk 'system("[ -e /usr/local/lib/" $1 " ]") == 0 { next } { print "so:" $1 }' \ | awk 'system("[ -e /usr/local/lib/" $1 " ]") == 0 { next } { print "so:" $1 }' \
)"; \ )"; \
apk add --no-cache --virtual .nextcloud-phpext-rundeps $runDeps; \ apk add --virtual .nextcloud-phpext-rundeps $runDeps; \
apk del .build-deps; \ apk del .build-deps
\
mkdir -p \ RUN mkdir -p \
/var/log/supervisord \ /var/log/supervisord \
/var/run/supervisord \ /var/run/supervisord \
; \ ;
chmod 777 -R /var/log/supervisord; \
chmod 777 -R /var/run/supervisord; \ COPY supervisord.conf /
ENV NEXTCLOUD_UPDATE=1
CMD ["/usr/bin/supervisord", "-c", "/supervisord.conf"]
# Custom:
RUN set -ex; \
\ \
apk add --no-cache \ apk add --no-cache \
bash \ bash \
@ -228,41 +201,55 @@ RUN set -ex; \
git \ git \
postgresql-client \ postgresql-client \
tzdata \ tzdata \
sudo \ mawk \
grep \ ; \
nodejs \ rm -rf /var/lib/apt/lists/*
bind-tools \
imagemagick \ RUN set -ex; \
imagemagick-svg \
imagemagick-heic \
imagemagick-tiff \
coreutils; \
\
grep -q '^pm = dynamic' /usr/local/etc/php-fpm.d/www.conf; \ grep -q '^pm = dynamic' /usr/local/etc/php-fpm.d/www.conf; \
sed -i 's/^pm = dynamic/pm = ondemand/' /usr/local/etc/php-fpm.d/www.conf; \ sed -i 's/^pm = dynamic/pm = ondemand/' /usr/local/etc/php-fpm.d/www.conf; \
# Sync this with max db connections and MaxRequestWorkers sed -i 's/^pm.max_children =.*/pm.max_children = 80/' /usr/local/etc/php-fpm.d/www.conf; \
# We don't actually expect so many children but don't want to limit it artificially because people will report issues otherwise. sed -i 's/^pm.start_servers =.*/pm.start_servers = 2/' /usr/local/etc/php-fpm.d/www.conf; \
# Also children will usually be terminated again after the process is done due to the ondemand setting sed -i 's/^pm.min_spare_servers =.*/pm.min_spare_servers = 1/' /usr/local/etc/php-fpm.d/www.conf; \
sed -i 's/^pm.max_children =.*/pm.max_children = 5000/' /usr/local/etc/php-fpm.d/www.conf; \ sed -i 's/^pm.max_spare_servers =.*/pm.max_spare_servers = 3/' /usr/local/etc/php-fpm.d/www.conf
sed -i 's|access.log = /proc/self/fd/2|access.log = /proc/self/fd/1|' /usr/local/etc/php-fpm.d/docker.conf; \
\ RUN set -ex; \
echo "[ -n \"\$TERM\" ] && [ -f /root.motd ] && cat /root.motd" >> /root/.bashrc; \ rm -rf /tmp/nextcloud-aio && \
\ mkdir -p /tmp/nextcloud-aio && \
cd /tmp/nextcloud-aio && \
git clone https://github.com/nextcloud-releases/all-in-one.git --depth 1 .; \
mkdir -p /usr/src/nextcloud/apps/nextcloud-aio; \
cp -r ./app/* /usr/src/nextcloud/apps/nextcloud-aio/
RUN set -ex; \
chown www-data:root -R /usr/src && \ chown www-data:root -R /usr/src && \
chmod 777 -R /usr/local/etc/php/conf.d && \ chown www-data:root -R /usr/local/etc/php/conf.d && \
chmod 777 -R /usr/local/etc/php-fpm.d && \ chown www-data:root -R /usr/local/etc/php-fpm.d && \
chmod -R 777 /tmp; \ chown www-data:root -R /var/log/supervisord/ && \
chmod -R 777 /etc/openldap; \ chown www-data:root -R /var/run/supervisord/ && \
\ rm -r /usr/src/nextcloud/apps/updatenotification
mkdir -p /nc-updater; \
chmod -R 777 /nc-updater
# hadolint ignore=DL3002 COPY start.sh /
USER root COPY notify.sh /
RUN set -ex; \
chmod +x /start.sh && \
chmod +r /supervisord.conf && \
chmod +x /entrypoint.sh && \
chmod +r /upgrade.exclude && \
chmod +x /cron.sh && \
chmod +x /notify.sh && \
chmod +x /activate-collabora.sh
RUN set -ex; \
mkdir /mnt/ncdata; \
chown www-data:www-data /mnt/ncdata;
VOLUME /mnt/ncdata
# Give root a random password
RUN echo "root:$(openssl rand -base64 12)" | chpasswd
USER www-data
ENTRYPOINT ["/start.sh"] ENTRYPOINT ["/start.sh"]
CMD ["/usr/bin/supervisord", "-c", "/supervisord.conf"]
HEALTHCHECK CMD /healthcheck.sh HEALTHCHECK CMD (nc -z localhost 9000 && curl -skI localhost:7867) || exit 1
LABEL com.centurylinklabs.watchtower.enable="false" \
wud.watch="false" \
org.label-schema.vendor="Nextcloud"

View file

@ -1,35 +0,0 @@
# Nextcloud All-in-One ``nextcloud`` Container
This folder contains the OCI/Docker container definition, along with associated resources and configuration files, for building the `nextcloud` container as part of the [Nextcloud All-in-One](https://github.com/nextcloud/all-in-one) project. This container hosts PHP and the Nextcloud Server application.
## Overview
The Nextcloud container provides the core Nextcloud application environment, including the necessary dependencies and configuration for seamless integration into the All-in-One stack. The container hosts:
- The PHP SAPI/backend (php-fpm)
- Nextcloud background jobs and scheduled tasks, which are handled via cron
- Miscellaneous minor support services specific to AIO's Nextcloud deployment (health and exec)
## Contents
- **Dockerfile**: Instructions for building the Nextcloud container image.
- **Entrypoint script**: The `start.sh` script is used for container initialization and runtime configuration before starting supervisord.
- **Nextcloud configuration files**: Specific to running in a containerized setting and/or within AIO.
- **Supervisor**: The `supervisord.conf` file defines the long-running services hosted within the container (php-fpm, cron, etc.).
## Usage
This container is intended to be used as part of the All-in-One deployment and is not meant to be used on its own. Among other requirements, it needs a web server container (which AIO provides in a dedicated Apache container). It is designed to be orchestrated by the [All-in-One mastercontainer](https://github.com/nextcloud/all-in-one/tree/main/Containers/mastercontainer) or used within an [AIO Manual Installation](https://github.com/nextcloud/all-in-one/tree/main/manual-install) or [AIO Helm chart](https://github.com/nextcloud/all-in-one/tree/main/nextcloud-aio-helm-chart).
## Documentation
- [Nextcloud All-in-One Documentation](https://github.com/nextcloud/all-in-one#readme)
- [Nextcloud Documentation](https://docs.nextcloud.com/)
## Contributing
Contributions are welcome! Please follow the Nextcloud project's guidelines and submit pull requests or issues via the main repository.
## License
This folder and its contents are licensed under the [GNU AGPLv3](https://www.gnu.org/licenses/agpl-3.0.html), in line with the rest of Nextcloud All-in-One.

View file

@ -0,0 +1,13 @@
#!/bin/bash
if [ "$COLLABORA_ENABLED" != yes ]; then
# Basically sleep for forever if collabora is not enabled
sleep inf
fi
while ! nc -z "$NC_DOMAIN" 443; do
sleep 5
done
sleep 10
echo "Activating collabora config..."
php /var/www/html/occ richdocuments:activate-config
sleep inf

View file

@ -1,5 +0,0 @@
<?php
$CONFIG = array (
'one-click-instance' => true,
'one-click-instance.user-limit' => 100,
);

View file

@ -2,20 +2,14 @@
$CONFIG = array ( $CONFIG = array (
'apps_paths' => array ( 'apps_paths' => array (
0 => array ( 0 => array (
'path' => '/var/www/html/apps', 'path' => OC::$SERVERROOT.'/apps',
'url' => '/apps', 'url' => '/apps',
'writable' => false, 'writable' => false,
), ),
1 => array ( 1 => array (
'path' => '/var/www/html/custom_apps', 'path' => OC::$SERVERROOT.'/custom_apps',
'url' => '/custom_apps', 'url' => '/custom_apps',
'writable' => true, 'writable' => true,
), ),
), ),
); );
if (getenv('APPS_ALLOWLIST')) {
$CONFIG['appsallowlist'] = explode(" ", getenv('APPS_ALLOWLIST'));
}
if (getenv('NEXTCLOUD_APP_STORE_URL')) {
$CONFIG['appstoreurl'] = getenv('NEXTCLOUD_APP_STORE_URL');
}

View file

@ -1,5 +0,0 @@
<?php
// Check if NEXTCLOUD_TRUSTED_CERTIFICATES_ are configured
if (str_contains(implode(' ', array_keys(getenv())), 'NEXTCLOUD_TRUSTED_CERTIFICATES_')) {
$CONFIG['default_certificates_bundle_path'] = '/var/www/html/data/certificates/ca-bundle.crt';
}

View file

@ -1,17 +0,0 @@
<?php
if (getenv('NEXTCLOUD_TRUSTED_CERTIFICATES_POSTGRES')) {
$CONFIG = array(
'pgsql_ssl' => array(
'mode' => 'verify-ca',
'rootcert' => '/var/www/html/data/certificates/ca-bundle.crt',
),
);
}
if (getenv('NEXTCLOUD_TRUSTED_CERTIFICATES_MYSQL')) {
$CONFIG = array(
'dbdriveroptions' => array(
PDO::MYSQL_ATTR_SSL_CA => '/var/www/html/data/certificates/ca-bundle.crt',
),
);
}

View file

@ -1,13 +0,0 @@
<?php
if (getenv('HTTP_PROXY')) {
$CONFIG['proxy'] = getenv('HTTP_PROXY');
}
if (getenv('HTTPS_PROXY')) {
$CONFIG['proxy'] = getenv('HTTPS_PROXY');
}
if (getenv('PROXY_USER_PASSWORD')) {
$CONFIG['proxyuserpwd'] = getenv('PROXY_USER_PASSWORD');
}
if (getenv('NO_PROXY')) {
$CONFIG['proxyexclude'] = explode(',', getenv('NO_PROXY'));
}

View file

@ -9,15 +9,9 @@ if (getenv('REDIS_HOST')) {
), ),
); );
if (getenv('REDIS_PORT')) { if (getenv('REDIS_HOST_PORT') !== false) {
$CONFIG['redis']['port'] = (int) getenv('REDIS_PORT'); $CONFIG['redis']['port'] = (int) getenv('REDIS_HOST_PORT');
} } elseif (getenv('REDIS_HOST')[0] != '/') {
$CONFIG['redis']['port'] = 6379;
if (getenv('REDIS_DB_INDEX')) {
$CONFIG['redis']['dbindex'] = (int) getenv('REDIS_DB_INDEX');
}
if (getenv('REDIS_USER_AUTH') !== false) {
$CONFIG['redis']['user'] = str_replace("&auth[]=", "", getenv('REDIS_USER_AUTH'));
} }
} }

View file

@ -4,34 +4,24 @@ if (getenv('OBJECTSTORE_S3_BUCKET')) {
$use_path = getenv('OBJECTSTORE_S3_USEPATH_STYLE'); $use_path = getenv('OBJECTSTORE_S3_USEPATH_STYLE');
$use_legacyauth = getenv('OBJECTSTORE_S3_LEGACYAUTH'); $use_legacyauth = getenv('OBJECTSTORE_S3_LEGACYAUTH');
$autocreate = getenv('OBJECTSTORE_S3_AUTOCREATE'); $autocreate = getenv('OBJECTSTORE_S3_AUTOCREATE');
$multibucket = getenv('OBJECTSTORE_S3_MULTIBUCKET');
$CONFIG = array( $CONFIG = array(
'objectstore' => array( 'objectstore' => array(
'class' => '\OC\Files\ObjectStore\S3', 'class' => '\OC\Files\ObjectStore\S3',
'arguments' => array( 'arguments' => array(
'multibucket' => $multibucket === 'true',
'num_buckets' => (int)getenv('OBJECTSTORE_S3_NUM_BUCKETS') ?: 64,
'bucket' => getenv('OBJECTSTORE_S3_BUCKET'), 'bucket' => getenv('OBJECTSTORE_S3_BUCKET'),
'key' => getenv('OBJECTSTORE_S3_KEY') ?: '', 'key' => getenv('OBJECTSTORE_S3_KEY') ?: '',
'secret' => getenv('OBJECTSTORE_S3_SECRET') ?: '', 'secret' => getenv('OBJECTSTORE_S3_SECRET') ?: '',
'region' => getenv('OBJECTSTORE_S3_REGION') ?: '', 'region' => getenv('OBJECTSTORE_S3_REGION') ?: '',
'hostname' => getenv('OBJECTSTORE_S3_HOST') ?: '', 'hostname' => getenv('OBJECTSTORE_S3_HOST') ?: '',
'port' => getenv('OBJECTSTORE_S3_PORT') ?: '', 'port' => getenv('OBJECTSTORE_S3_PORT') ?: '',
'storageClass' => getenv('OBJECTSTORE_S3_STORAGE_CLASS') ?: '',
'objectPrefix' => getenv("OBJECTSTORE_S3_OBJECT_PREFIX") ? getenv("OBJECTSTORE_S3_OBJECT_PREFIX") : "urn:oid:", 'objectPrefix' => getenv("OBJECTSTORE_S3_OBJECT_PREFIX") ? getenv("OBJECTSTORE_S3_OBJECT_PREFIX") : "urn:oid:",
'autocreate' => strtolower($autocreate) !== 'false', 'autocreate' => (strtolower($autocreate) === 'false' || $autocreate == false) ? false : true,
'use_ssl' => strtolower($use_ssl) !== 'false', 'use_ssl' => (strtolower($use_ssl) === 'false' || $use_ssl == false) ? false : true,
// required for some non Amazon S3 implementations // required for some non Amazon S3 implementations
'use_path_style' => strtolower($use_path) === 'true', 'use_path_style' => $use_path == true && strtolower($use_path) !== 'false',
// required for older protocol versions // required for older protocol versions
'legacy_auth' => strtolower($use_legacyauth) === 'true', 'legacy_auth' => $use_legacyauth == true && strtolower($use_legacyauth) !== 'false'
'use_nextcloud_bundle' => 1,
) )
) )
); );
}
$sse_c_key = getenv('OBJECTSTORE_S3_SSE_C_KEY');
if ($sse_c_key) {
$CONFIG['objectstore']['arguments']['sse_c_key'] = $sse_c_key;
}
}

View file

@ -1,31 +0,0 @@
<?php
if (getenv('SMTP_HOST') && getenv('MAIL_FROM_ADDRESS') && getenv('MAIL_DOMAIN')) {
$CONFIG = array (
'mail_smtpmode' => 'smtp',
'mail_smtphost' => getenv('SMTP_HOST'),
'mail_smtpport' => getenv('SMTP_PORT') ?: (getenv('SMTP_SECURE') ? 465 : 25),
'mail_smtpsecure' => getenv('SMTP_SECURE') ?: '',
'mail_smtpauth' => getenv('SMTP_NAME') && getenv('SMTP_PASSWORD'),
'mail_smtpauthtype' => getenv('SMTP_AUTHTYPE') ?: 'LOGIN',
'mail_smtpname' => getenv('SMTP_NAME') ?: '',
'mail_from_address' => getenv('MAIL_FROM_ADDRESS'),
'mail_domain' => getenv('MAIL_DOMAIN'),
);
if (getenv('SMTP_PASSWORD')) {
$CONFIG['mail_smtppassword'] = getenv('SMTP_PASSWORD');
} else {
$CONFIG['mail_smtppassword'] = '';
}
}
if (getenv('NEXTCLOUD_TRUSTED_CERTIFICATES_MAILER')) {
$CONFIG = array(
'mail_smtpstreamoptions' => array(
'ssl' => array(
'verify_peer_name' => false,
'cafile' => '/var/www/html/data/certificates/ca-bundle.crt',
)
)
);
}

View file

@ -1,18 +1,7 @@
#!/bin/bash #!/bin/bash
wait_for_cron() { set -eu
set -x
while [ -n "$(pgrep -f /var/www/html/cron.php)" ]; do
echo "Waiting for cron to stop..."
sleep 5
done
echo "Cronjob successfully exited."
exit
}
trap wait_for_cron SIGINT SIGTERM
while true; do while true; do
php -f /var/www/html/cron.php & php -f /var/www/html/cron.php &
sleep 5m & sleep 5m
wait $!
done done

File diff suppressed because it is too large Load diff

Some files were not shown because too many files have changed in this diff Show more