1
0
mirror of https://github.com/xzeldon/vwdump.git synced 2025-07-13 15:54:37 +03:00

Overhaul (#4)

**README**
- Overhaul; changed docs to be better aligned to the new `backup.sh`.
- Add example `docker-compose`.

**Dockerfile**
- Install `zip`.
- Remove installation of `sqlite`.
- Remove unnecessary variables.

**backup.sh**
- Overhaul; now zips all required and recommended files and directories stated at [vaultwarden docs](https://github.com/dani-garcia/vaultwarden/wiki/Backing-up-your-vault).
- Removed deleting old backups; will be added later on in a separate script.

**entrypoint.sh**
- Removed creating folders.
This commit is contained in:
jmqm
2021-05-25 20:34:39 -05:00
committed by GitHub
parent ee15635106
commit 02c9d71114
7 changed files with 54 additions and 369 deletions

View File

@ -1,34 +0,0 @@
name: 'Lock down repository'
on:
issues:
types: opened
pull_request:
types: opened
jobs:
lockdown:
runs-on: ubuntu-latest
steps:
- uses: dessant/repo-lockdown@v2
with:
github-token: ${{ github.token }}
issue-labels: 'off-topic'
issue-comment: >
Thanks for your contribution!
However, this repository does not accept bug reports,
since this is only a mirror of
https://gitlab.com/1O/bitwarden_rs-backup.
Please feel free to open the issue there.
skip-closed-issue-comment: true
pr-comment: >
Thanks for your contribution!
However, this repository does not accept pull requests,
since this is only a mirror of
https://gitlab.com/1O/bitwarden_rs-backup.
Please feel free to open the pull request there.
skip-closed-pr-comment: true

View File

@ -1,91 +0,0 @@
stages:
- push:readme
- build:docker
variables:
DOCKERHUB_REGISTRY: index.docker.io
DOCKERHUB_REPO: bw_backup
IMAGE_NAME_DOCKERHUB: $DOCKERHUB_REGISTRY/$DOCKERHUB_USER/$DOCKERHUB_REPO
# see https://gitlab.com/gitlab-org/gitlab-runner/issues/4501
DOCKER_DRIVER: overlay2
DOCKER_TLS_CERTDIR: "/certs"
# See https://github.com/docker/buildx/releases
BUILDX_VERSION: v0.5.1
BUILDX_ARCH: linux-amd64
.docker_login: &docker_login
docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
.dockerhub_login: &dockerhub_login
docker login -u "$DOCKERHUB_USER" -p "$DOCKERHUB_PASSWORD" $DOCKERHUB_REGISTRY
.docker_build_template: &docker_build
stage: build:docker
image: docker:latest
services:
- name: docker:dind
command: ["--experimental"]
tags:
- shared
before_script:
- apk add curl
- mkdir -p ~/.docker/cli-plugins
- curl -sSLo ~/.docker/cli-plugins/docker-buildx https://github.com/docker/buildx/releases/download/$BUILDX_VERSION/buildx-$BUILDX_VERSION.$BUILDX_ARCH
- chmod +x ~/.docker/cli-plugins/docker-buildx
- docker run --rm --privileged multiarch/qemu-user-static --reset -p yes
- docker context create my-context
- docker buildx create --use my-context
- docker info
push_readme:
stage: push:readme
image: docker:latest
services:
- docker:dind
tags:
- shared
script:
- docker run
-v $(pwd)/README.md:/data/README.md:ro
-e DOCKER_USER=$DOCKERHUB_USER
-e DOCKER_PASS=$DOCKERHUB_PASSWORD
-e PUSHRM_FILE=/data/README.md
-e PUSHRM_TARGET=docker.io/$DOCKERHUB_USER/$DOCKERHUB_REPO
chko/docker-pushrm
rules:
- if: $CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "main"
changes:
- README.md
build_main:
<<: *docker_build
script:
- *docker_login
- *dockerhub_login
#- docker build --pull -t "$IMAGE_NAME" -t "$IMAGE_NAME_DOCKERHUB:${CI_COMMIT_TAG:-latest}" .
- docker buildx build
--push
--platform linux/arm/v7,linux/arm64/v8,linux/amd64
--tag "$CI_REGISTRY_IMAGE:${CI_COMMIT_TAG:-latest}"
--tag "$IMAGE_NAME_DOCKERHUB:${CI_COMMIT_TAG:-latest}" .
#- docker push "$IMAGE_NAME"
#- docker push "$IMAGE_NAME_DOCKERHUB:${CI_COMMIT_TAG:-latest}"
rules:
- if: '$CI_COMMIT_BRANCH == "main" || $CI_COMMIT_TAG'
changes:
- Dockerfile
- backup.sh
- entrypoint.sh
build:
<<: *docker_build
script:
- *docker_login
#- docker build --pull -t "$CI_REGISTRY_IMAGE/$CI_COMMIT_REF_SLUG/bw_backup:${CI_COMMIT_TAG:-latest}" .
- docker buildx build
--push
--platform linux/arm/v7,linux/arm64/v8,linux/amd64
--tag "$CI_REGISTRY_IMAGE:$CI_COMMIT_REF_NAME" .
#- docker push "$CI_REGISTRY_IMAGE/$CI_COMMIT_REF_SLUG/bw_backup:${CI_COMMIT_TAG:-latest}"
rules:
- if: $CI_COMMIT_BRANCH != "main"

View File

@ -4,18 +4,12 @@ FROM ${ARCH}alpine:latest
RUN addgroup -S app && adduser -S -G app app RUN addgroup -S app && adduser -S -G app app
RUN apk add --no-cache \ RUN apk add --no-cache \
sqlite \
busybox-suid \ busybox-suid \
su-exec \ su-exec \
zip \
tzdata tzdata
ENV DB_FILE /data/db.sqlite3
ENV BACKUP_FILE /data/db_backup/backup.sqlite3
#ENV ATTACHMENT_BACKUP_FILE=/data/attachments_backup/attachments
ENV ATTACHMENT_DIR=/data/attachments
ENV BACKUP_FILE_PERMISSIONS 700
ENV CRON_TIME "0 5 * * *" ENV CRON_TIME "0 5 * * *"
ENV TIMESTAMP false
ENV UID 100 ENV UID 100
ENV GID 100 ENV GID 100
ENV CRONFILE /etc/crontabs/root ENV CRONFILE /etc/crontabs/root
@ -29,6 +23,5 @@ RUN mkdir /app/log/ \
&& chown -R app:app /app/ \ && chown -R app:app /app/ \
&& chmod -R 777 /app/ \ && chmod -R 777 /app/ \
&& chmod +x /usr/local/bin/entrypoint.sh && chmod +x /usr/local/bin/entrypoint.sh
# && echo "\$CRON_TIME \$BACKUP_CMD >> \$LOGFILE 2>&1" | crontab -
ENTRYPOINT ["entrypoint.sh"] ENTRYPOINT ["entrypoint.sh"]

116
README.md
View File

@ -1,92 +1,60 @@
# bitwarden_rs Backup Backs up vaultwarden files using cron daemon.
Docker Containers for [bitwarden_rs](https://github.com/dani-garcia/bitwarden_rs) Backup. Can be set to run automatically.
## Usage ## Usage
Since version v0.0.7 you can always use the `latest` tag, since the image is build with
multi-arch support. Of course you can always use the version tags `vx.y.z` to stick
to a specific version. Note however that there will be no security updates for the
alpine base image if you stick to a version.
Make sure that your **bitwarden_rs container is named `bitwarden`** otherwise #### Automatic Backups
you have to replace the container name in the `--volumes-from` section of the `docker run` call. Refer to the `docker-compose` section below. By default, backing up is automatic.
### Automatic Backups #### Manual Backups
A cron daemon is running inside the container and the container keeps running in background. Pass `manual` to `docker run` or `docker-compose` as a `command`.
Start backup container with default settings (automatic backup at 5 am) ## docker-compose
```sh ```
docker run -d --restart=always --name bitwarden_backup --volumes-from=bitwarden jmqm/bitwarden_rs-backup services:
vaultwarden:
# Vaultwarden configuration here.
backup:
image: jmqm/vaultwarden_backup
container_name: vaultwarden_backup
volumes:
- "/vaultwarden_data_directory:/data:ro"
- "/backup_directory:/backups"
- "/etc/localtime:/etc/localtime:ro" # Container uses date from host.
environment:
- DELETE_AFTER=30 #optional
- CRON_TIME=* */4 * * *
- UID=1024
- GID=100
``` ```
Example for backup including attachment folder (see [Environment variables section](#environment-variables) for more information) ## Environment Variables
```sh #### ⭐Required, 👍 Recommended
docker run -d --restart=always --name bitwarden_backup --volumes-from=bitwarden -e ATTACHMENT_BACKUP_FILE=/data/attachments_backup/attachments jmqm/bitwarden_rs-backup | Variable | Description |
``` | -------------- | ------------------------------------------------------------------------------------------------------------------------------------- |
| UID ⭐| User ID to run the cron job as. |
| GID ⭐| Group ID to run the cron job as. |
| CRON_TIME 👍| When to run. Info [here](https://www.ibm.com/docs/en/db2oc?topic=task-unix-cron-format) and generator [here](https://crontab.guru/) |
| DELETE_AFTER 👍| Delete backups _X_ days old. _(unsupported at the moment)_ |
Example for backup including send folder (see [Environment variables section](#environment-variables) for more information) #### Optional
```sh | Variable | Description |
docker run -d --restart=always --name bitwarden_backup --volumes-from=bitwarden -e SEND_BACKUP_FILE=/data/sends_backup/sends jmqm/bitwarden_rs-backup | -------------- | -------------------------------------------------------------------------------------------- |
``` | TZ ¹ | Timezone inside the container. Can mount `/etc/localtime` instead as well _(recommended)_. |
| LOGFILE | Log file path relative to inside the container. |
| CRONFILE | Cron file path relative to inside the container. |
Example for hourly backups ¹ See <https://en.wikipedia.org/wiki/List_of_tz_database_time_zones> for more information
```sh
docker run -d --restart=always --name bitwarden_backup --volumes-from=bitwarden -e CRON_TIME="0 * * * *" jmqm/bitwarden_rs-backup
```
Example for backups that delete after 30 days ## Errors
```sh #### Wrong permissions
docker run -d --restart=always --name bitwarden_backup --volumes-from=bitwarden -e DELETE_AFTER=30 jmqm/bitwarden_rs-backup
```
### Manual Backups
You can use the crontab of your host to schedule the backup and the container will only be running during the backup process.
```sh
docker run --rm --volumes-from=bitwarden jmqm/bitwarden_rs-backup manual
```
Keep in mind that the above command will be executed inside the container. So
- `$DB_FILE` is the path to the bitwarden database which is normally locatated at `/data/db.sqlite3`
- `$BACKUP_FILE` can be any place inside the container. Easiest would be to set it to `/data/backup.sqlite3` which will create the backup near the original database file.
If you want the backed up file to be stored outside the container you have to mount
a directory by adding `-v <PATH_ON_YOUR_HOST>:<PATH_INSIDE_CONTAINER>`. The complete command could look like this
```sh
docker run --rm --volumes-from=bitwarden -e UID=0 -e BACKUP_FILE=/myBackup/backup.sqlite3 -e TIMESTAMP=true -v /tmp/myBackup:/myBackup jmqm/bitwarden_rs-backup manual
```
## Environment variables
| ENV | Description |
| ----------------------- | -------------------------------------------------------------------------------------- |
| DB_FILE | Path to the Bitwarden sqlite3 database *inside* the container |
| BACKUP_FILE | Path to the desired backup location *inside* the container |
| BACKUP_FILE_PERMISSIONS | Sets the permissions of the backup file (**CAUTION** [^1]) |
| CRON_TIME | Cronjob format "Minute Hour Day_of_month Month_of_year Day_of_week Year" |
| TIMESTAMP | Set to `true` to append timestamp to the `BACKUP_FILE` |
| UID | User ID to run the cron job with |
| GID | Group ID to run the cron job with |
| LOGFILE | Path to the logfile *inside* the container |
| CRONFILE | Path to the cron file *inside* the container |
| DELETE_AFTER | Delete old backups after X many days |
| TZ | Set the timezone inside the container [^2] |
| ATTACHMENT_BACKUP_FILE | If present, the directory `ATTACHMENT_DIR` are backup in path `ATTACHMENT_BACKUP_FILE` |
| ATTACHMENT_DIR | Path to the Bitwarden attachment folder *inside* the container |
| SEND_BACKUP_FILE | If present, the directory `SEND_DIR` are backup in path `SEND_BACKUP_FILE` |
| SEND_DIR | Path to the Bitwarden send folder *inside* the container |
[^1]: The permissions should at least be 700 since the backup folder itself gets the same permissions and with 600 it would not be accessible.
[^2]: see <https://en.wikipedia.org/wiki/List_of_tz_database_time_zones> for more information
## Common erros
### Wrong permissions
`Error: unable to open database file` is most likely caused by permission errors. `Error: unable to open database file` is most likely caused by permission errors.
Note that sqlite3 creates a lock file in the source directory while running the backup. Note that sqlite3 creates a lock file in the source directory while running the backup.
So source *AND* destination have to be +rw for the user. You can set the user and group ID So source *AND* destination have to be +rw for the user. You can set the user and group ID
via the `UID` and `GID` environment variables like described above. via the `UID` and `GID` environment variables like described above.
### Date Time issues / Wrong timestamp #### Date Time issues / Wrong timestamp
If you need timestamps in your local timezone you should mount `/etc/timezone:/etc/timezone:ro` and `/etc/localtime:/etc/localtime:ro` If you need timestamps in your local timezone you should mount `/etc/timezone:/etc/timezone:ro` and `/etc/localtime:/etc/localtime:ro`
like it's done in the [docker-compose.yml](docker-compose.yml). An other possible solution is to set the environment variable accordingly (like `TZ=Europe/Berlin`) like it's done in the [docker-compose.yml](docker-compose.yml). An other possible solution is to set the environment variable accordingly (like `TZ=Europe/Berlin`)
(see <https://en.wikipedia.org/wiki/List_of_tz_database_time_zones> for more information). (see <https://en.wikipedia.org/wiki/List_of_tz_database_time_zones> for more information).
**Attention** if you are on an ARM based platform please note that [alpine](https://alpinelinux.org/) is used as base image for this project to keep things small. Since alpine 3.13 and above it's possible that you will end up with a container with broken time and date settings (i.e. year 1900). This is a known problem in the alpine project (see [Github issue](https://github.com/alpinelinux/docker-alpine/issues/141) and [solution](https://wiki.alpinelinux.org/wiki/Release_Notes_for_Alpine_3.13.0#time64_requirements)) and there is nothing I can do about it. However in the [alpine wiki](https://wiki.alpinelinux.org/wiki/Release_Notes_for_Alpine_3.13.0#time64_requirements) a solution is being proposed which I also tested tested on my raspberry pi. After following the described process it started working again as expected. If you still experience issues or could for some reason not apply the aforementioned fixes please feel free to open an issue.

View File

@ -1,75 +1,14 @@
#!/bin/sh #!/bin/sh
# Check if db file is accessible and exit otherwise # Create variable for new backup zip.
if [ ! -e "$DB_FILE" ] BACKUP_ZIP=/backups/$(date "+%F_%H.%M.%S").zip
then
echo "Database $DB_FILE not found!\nPlease check if you mounted the bitwarden_rs volume with '--volumes-from=bitwarden'"!
exit 1;
fi
# Create variables for the files and directories to be zipped.
BACKUP_DB=db.sqlite3 # file
BACKUP_RSA=rsa_key* # files
BACKUP_CONFIG=config.json # file
BACKUP_ATTACHMENTS=attachments # directory
BACKUP_SENDS=sends # directory
# Check if ATTACHMENT_BACKUP_FILE exist. If it's true, attechment are backup. We define var with or without TIMESTAMP # Create a zip of the files and directories.
# In anycase, we define var LOCALVAR_ATTACHMENT_BACKUP_FILE to limit the complexity of code (the number of if-else) cd /data && zip -r $BACKUP_ZIP $BACKUP_DB $BACKUP_RSA $BACKUP_CONFIG $BACKUP_ATTACHMENTS $BACKUP_SENDS && cd ..
if [ ! -z $ATTACHMENT_BACKUP_FILE ]
then
LOCALVAR_ATTACHMENT_BACKUP_FILE="$ATTACHMENT_BACKUP_FILE"
else
LOCALVAR_ATTACHMENT_BACKUP_FILE=""
fi
# Check if SEND_BACKUP_FILE exist. If it's true, attechment are backup. We define var with or without TIMESTAMP
# In anycase, we define var LOCALVAR_SEND_BACKUP_FILE to limit the complexity of code (the number of if-else)
if [ ! -z $SEND_BACKUP_FILE ]
then
LOCALVAR_SEND_BACKUP_FILE="$SEND_BACKUP_FILE"
else
LOCALVAR_SEND_BACKUP_FILE=""
fi
if [ $TIMESTAMP = true ]
then
FINAL_BACKUP_FILE="$(echo "$BACKUP_FILE")_$(date "+%F-%H%M%S")"
FINAL_BACKUP_ATTACHMENT="$(echo "$LOCALVAR_ATTACHMENT_BACKUP_FILE")_$(date "+%F-%H%M%S")"
FINAL_BACKUP_SEND="$(echo "$LOCALVAR_SEND_BACKUP_FILE")_$(date "+%F-%H%M%S")"
else
FINAL_BACKUP_FILE=$BACKUP_FILE
FINAL_BACKUP_ATTACHMENT=$LOCALVAR_ATTACHMENT_BACKUP_FILE
FINAL_BACKUP_SEND=$LOCALVAR_SEND_BACKUP_FILE
fi
/usr/bin/sqlite3 $DB_FILE ".backup $FINAL_BACKUP_FILE"
if [ $? -eq 0 ]
then
echo "$(date "+%F %T") - Backup successfull to $FINAL_BACKUP_FILE"
else
echo "$(date "+%F %T") - Backup unsuccessfull"
fi
if [ ! -z $ATTACHMENT_BACKUP_FILE ]
then
echo "Create tar ${FINAL_BACKUP_ATTACHMENT}.tgz\n"
/bin/tar -czf ${FINAL_BACKUP_ATTACHMENT}.tgz ${ATTACHMENT_DIR}
fi
if [ ! -z $SEND_BACKUP_FILE ]
then
echo "Create tar ${FINAL_BACKUP_SEND}.tgz\n"
/bin/tar -czf ${FINAL_BACKUP_SEND}.tgz ${SEND_DIR}
fi
if [ ! -z $DELETE_AFTER ] && [ $DELETE_AFTER -gt 0 ]
then
find $(dirname "$BACKUP_FILE") -name "$(basename "$BACKUP_FILE")*" -type f -mtime +$DELETE_AFTER -exec rm -f {} \; -exec echo "Deleted {} after $DELETE_AFTER days" \;
if [ ! -z $ATTACHMENT_BACKUP_FILE ]
then
find $(dirname "$FINAL_BACKUP_ATTACHMENT") -name "$(basename "$FINAL_BACKUP_ATTACHMENT")*" -type f -mtime +$DELETE_AFTER -exec rm -f {} \; -exec echo "Deleted {} after $DELETE_AFTER days" \;
fi
if [ ! -z $SEND_BACKUP_FILE ]
then
find $(dirname "$FINAL_BACKUP_SEND") -name "$(basename "$FINAL_BACKUP_SEND")*" -type f -mtime +$DELETE_AFTER -exec rm -f {} \; -exec echo "Deleted {} after $DELETE_AFTER days" \;
fi
fi

View File

@ -1,58 +0,0 @@
---
version: '3.7'
services:
# this will build the image - usually not needed
# bw_test:
# build: ./
# container_name: bw_test
# ports:
# - 8003:80
# volumes:
# - bitwarden:/data/
# - ./test:/backup_folder/
# restart: on-failure
# init: true
# environment:
# - DB_FILE=/data/db.sqlite3
# # uncomment this if you want your backup to be written to ./backup/ folder"
# - BACKUP_FILE=/backup_folder/db_backup/backup.sqlite3
# - CRON_TIME=*/1 * * * *
# - TIMESTAMP=false
# - UID=1002
# - GID=1002
#
bitwarden:
image: bitwardenrs/server
container_name: bitwarden
ports:
- 8002:80
volumes:
- bitwarden:/data/
restart: on-failure
bw_backup:
image: bruceforce/bw_backup
container_name: bw_backup
restart: on-failure
init: true
depends_on:
- bitwarden
volumes:
- bitwarden:/data/
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
# uncomment this if you want your backup to be written to ./backup/ folder"
# - ./backup:/backup_folder/
environment:
- DB_FILE=/data/db.sqlite3
# uncomment this if you want your backup to be written to ./backup/ folder"
# - BACKUP_FILE=/backup_folder/db_backup/backup.sqlite3
- BACKUP_FILE=/data/db_backup/backup.sqlite3
- CRON_TIME=0 5 * * *
- TIMESTAMP=false
- UID=0
- GID=0
- BACKUP_FILE_PERMISSIONS=700
volumes:
bitwarden:

View File

@ -1,41 +1,10 @@
#!/bin/sh #!/bin/sh
# vim: tabstop=2 shiftwidth=2 expandtab
#set -x
BACKUP_CMD="/sbin/su-exec ${UID}:${GID} /app/backup.sh" BACKUP_CMD="/sbin/su-exec ${UID}:${GID} /app/backup.sh"
echo "Running $(basename "$0") as $(id)" echo "Running $(basename "$0") as $(id)"
# Preparation # Run backup script once ($1 = First argument passed).
BACKUP_DIR=$(dirname "$BACKUP_FILE")
if [ ! -d "$BACKUP_DIR" ]
then
echo "$BACKUP_DIR not exists. Creating it with owner $UID:$GID and permissions $BACKUP_FILE_PERMISSIONS."
install -o $UID -g $GID -m $BACKUP_FILE_PERMISSIONS -d $BACKUP_DIR
fi
ATTACHMENT_BACKUP_DIR=$(dirname "$ATTACHMENT_BACKUP_FILE")
if [ ! -d "$ATTACHMENT_BACKUP_DIR" ]
then
echo "$ATTACHMENT_BACKUP_DIR not exists. Creating it with owner $UID:$GID and permissions $BACKUP_FILE_PERMISSIONS."
install -o $UID -g $GID -m $BACKUP_FILE_PERMISSIONS -d $ATTACHMENT_BACKUP_DIR
fi
SEND_BACKUP_DIR=$(dirname "$SEND_BACKUP_FILE")
if [ ! -d "$SEND_BACKUP_DIR" ]
then
echo "$SEND_BACKUP_DIR not exists. Creating it with owner $UID:$GID and permissions $BACKUP_FILE_PERMISSIONS."
install -o $UID -g $GID -m $BACKUP_FILE_PERMISSIONS -d $SEND_BACKUP_DIR
fi
# For compatibility reasons
if [ "$1" = "/backup.sh" ]; then
>&2 echo "Using /backup.sh is deprecated and will be removed in future versions! Please use \`manual\` as argument instead"
$BACKUP_CMD
fi
# Just run the backup script
if [ "$1" = "manual" ]; then if [ "$1" = "manual" ]; then
$BACKUP_CMD $BACKUP_CMD
exit 0 exit 0
@ -46,7 +15,6 @@ if [ "$(id -u)" -eq 0 ] && [ "$(grep -c "$BACKUP_CMD" "$CRONFILE")" -eq 0 ]; the
echo "Initalizing..." echo "Initalizing..."
echo "Writing backup command \"$BACKUP_CMD\" to cron." echo "Writing backup command \"$BACKUP_CMD\" to cron."
echo "$CRON_TIME $BACKUP_CMD >> $LOGFILE 2>&1" | crontab - echo "$CRON_TIME $BACKUP_CMD >> $LOGFILE 2>&1" | crontab -
fi fi
# Start crond if it's not running # Start crond if it's not running