From 4902410a6e47713a19ff5c99126bc15ad386d542 Mon Sep 17 00:00:00 2001 From: jyotipm29 Date: Wed, 30 Oct 2024 12:23:59 +0100 Subject: [PATCH] bump to 24.1 --- .github/workflows/lint.yml | 10 +- .github/workflows/single.sh | 9 +- .github/workflows/single_container.yml | 8 +- .gitmodules | 5 +- .travis.yml | 13 +- README.md | 125 +++++-- docs/Running_jobs_outside_of_the_container.md | 4 +- galaxy/Dockerfile | 148 +++++--- galaxy/install_biojs_vis.sh | 4 +- galaxy/install_tools_wrapper.sh | 4 +- galaxy/postgresql_provision.yml | 2 +- galaxy/provision.yml | 2 +- galaxy/reports.yml.sample | 39 ++ galaxy/reports_wsgi.ini.sample | 100 ----- galaxy/roles/galaxy-postgresql | 2 +- galaxy/run.sh | 50 +-- galaxy/setup_postgresql.py | 35 +- galaxy/startup.sh | 351 ++++++++++++------ galaxy/tools_conf_interactive.xml.sample | 19 + galaxy/welcome.html | 12 +- test/bioblend/Dockerfile | 31 +- test/bioblend/test.sh | 11 +- test/gridengine/test_outputhostname.py | 10 +- test/slurm/Dockerfile | 7 +- test/slurm/configure_slurm.py | 13 +- test/slurm/job_conf.xml | 3 + test/slurm/startup.sh | 6 +- test/slurm/supervisor_slurm.conf | 4 +- 28 files changed, 578 insertions(+), 449 deletions(-) create mode 100644 galaxy/reports.yml.sample delete mode 100644 galaxy/reports_wsgi.ini.sample create mode 100644 galaxy/tools_conf_interactive.xml.sample diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 36692201a..e33397ff8 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -5,18 +5,18 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v2 - - name: Cleanup to only use compose - run: rm -R docs galaxy test + uses: actions/checkout@v4 + # - name: Cleanup to only use compose + # run: rm -R docs galaxy test - name: Run shellcheck with reviewdog - uses: reviewdog/action-shellcheck@v1.1.3 + uses: reviewdog/action-shellcheck@v1.27.0 with: github_token: ${{ secrets.GITHUB_TOKEN }} reporter: github-check level: warning pattern: "*.sh" - name: Run hadolint with reviewdog - uses: reviewdog/action-hadolint@v1.16.0 + uses: reviewdog/action-hadolint@v1.46.0 with: github_token: ${{ secrets.GITHUB_TOKEN }} reporter: github-check diff --git a/.github/workflows/single.sh b/.github/workflows/single.sh index 446d867ae..5d1e309ca 100755 --- a/.github/workflows/single.sh +++ b/.github/workflows/single.sh @@ -19,6 +19,7 @@ docker info # start building this repo git submodule update --init --recursive +git submodule update --remote --rebase sudo chown 1450 /tmp && sudo chmod a=rwx /tmp ## define a container size check function, first parameter is the container name, second the max allowed size in MB @@ -50,8 +51,8 @@ docker run -d -p 8080:80 -p 8021:21 -p 8022:22 \ --privileged=true \ -v "$(pwd)/local_folder:/export/" \ -e GALAXY_CONFIG_ALLOW_USER_DATASET_PURGE=True \ - -e GALAXY_CONFIG_ALLOW_LIBRARY_PATH_PASTE=True \ - -e GALAXY_CONFIG_ENABLE_USER_DELETION=True \ + -e GALAXY_CONFIG_ALLOW_PATH_PASTE=True \ + -e GALAXY_CONFIG_ALLOW_USER_DELETION=True \ -e GALAXY_CONFIG_ENABLE_BETA_WORKFLOW_MODULES=True \ -v /tmp/:/tmp/ \ quay.io/bgruening/galaxy @@ -82,7 +83,7 @@ cd "${WORKING_DIR}/test/slurm/" && bash test.sh && cd "$WORKING_DIR" # - cd $WORKING_DIR/test/gridengine/ && bash test.sh && cd $WORKING_DIR echo 'Waiting for Galaxy to come up.' -galaxy-wait -g $BIOBLEND_GALAXY_URL --timeout 300 +galaxy-wait -g $BIOBLEND_GALAXY_URL --timeout 600 curl -v --fail $BIOBLEND_GALAXY_URL/api/version @@ -124,7 +125,7 @@ cd "$WORKING_DIR/test/bioblend/" && . ./test.sh && cd "$WORKING_DIR/" # then # # Test without install-repository wrapper # sleep 10 -# docker_exec_run bash -c 'cd $GALAXY_ROOT && python ./scripts/api/install_tool_shed_repositories.py --api admin -l http://localhost:80 --url https://toolshed.g2.bx.psu.edu -o devteam --name cut_columns --panel-section-name BEDTools' +# docker_exec_run bash -c 'cd $GALAXY_ROOT_DIR && python ./scripts/api/install_tool_shed_repositories.py --api admin -l http://localhost:80 --url https://toolshed.g2.bx.psu.edu -o devteam --name cut_columns --panel-section-name BEDTools' # fi diff --git a/.github/workflows/single_container.yml b/.github/workflows/single_container.yml index 31223bc18..b8ceb6664 100644 --- a/.github/workflows/single_container.yml +++ b/.github/workflows/single_container.yml @@ -1,15 +1,15 @@ name: Single Container Test -on: [push] +on: [push, pull_request] jobs: build_and_test: runs-on: ubuntu-latest strategy: matrix: - python-version: [3.7] + python-version: ['3.10'] steps: - name: Checkout - uses: actions/checkout@v2 - - uses: actions/setup-python@v1 + uses: actions/checkout@v4 + - uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} - name: Build and Test diff --git a/.gitmodules b/.gitmodules index fa5eda484..0277eb137 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,7 +1,8 @@ [submodule "galaxy/roles/galaxyproject.galaxyextras"] path = galaxy/roles/galaxyprojectdotorg.galaxyextras - url = https://github.com/galaxyproject/ansible-galaxy-extras - branch = 20.05 + url = https://github.com/jyotipm29/ansible-galaxy-extras + branch = 24.1 + [submodule "galaxy/roles/galaxy-postgresql"] path = galaxy/roles/galaxy-postgresql url = https://github.com/galaxyproject/ansible-postgresql diff --git a/.travis.yml b/.travis.yml index 4353fad27..9f4b60ff6 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,14 +1,14 @@ sudo: required language: python -python: 3.6 +python: 3.10 services: - docker env: matrix: - - TOX_ENV=py36 + - TOX_ENV=py310 global: - secure: "SEjcKJQ0NGXdpFxFhLVlyJmiBvgiLtR5Uufg90Vm3owKlMy0NSfIrOR+2dwNniqOp7QI3eVepnqjid/Ka0QStzVqMCe55OLkJ/TbTHnMLpbtY63mpGfogVRvxMMAVpzLpcQqtJFORZmO/MIWSLlBiXMMzOg3+tbXvQXmL17Rbmw=" @@ -39,6 +39,7 @@ before_install: # start building this repo - git submodule update --init --recursive + - git submodule update --remote --rebase - sudo chown 1450 /tmp && sudo chmod a=rwx /tmp - export WORKING_DIR="$TRAVIS_BUILD_DIR" - export DOCKER_RUN_CONTAINER="quay.io/bgruening/galaxy" @@ -68,8 +69,8 @@ before_install: --privileged=true \ -v `pwd`/local_folder:/export/ \ -e GALAXY_CONFIG_ALLOW_USER_DATASET_PURGE=True \ - -e GALAXY_CONFIG_ALLOW_LIBRARY_PATH_PASTE=True \ - -e GALAXY_CONFIG_ENABLE_USER_DELETION=True \ + -e GALAXY_CONFIG_ALLOW_PATH_PASTE=True \ + -e GALAXY_CONFIG_ALLOW_USER_DELETION=True \ -e GALAXY_CONFIG_ENABLE_BETA_WORKFLOW_MODULES=True \ -v /tmp/:/tmp/ \ quay.io/bgruening/galaxy @@ -101,7 +102,7 @@ script: # - cd $TRAVIS_BUILD_DIR/test/gridengine/ && bash test.sh && cd $WORKING_DIR - echo 'Waiting for Galaxy to come up.' - - galaxy-wait -g $BIOBLEND_GALAXY_URL --timeout 300 + - galaxy-wait -g $BIOBLEND_GALAXY_URL --timeout 600 - curl -v --fail $BIOBLEND_GALAXY_URL/api/version @@ -140,7 +141,7 @@ script: # then # # Test without install-repository wrapper # sleep 10 - # docker_exec_run bash -c 'cd $GALAXY_ROOT && python ./scripts/api/install_tool_shed_repositories.py --api admin -l http://localhost:80 --url https://toolshed.g2.bx.psu.edu -o devteam --name cut_columns --panel-section-name BEDTools' + # docker_exec_run bash -c 'cd $GALAXY_ROOT_DIR && python ./scripts/api/install_tool_shed_repositories.py --api admin -l http://localhost:80 --url https://toolshed.g2.bx.psu.edu -o devteam --name cut_columns --panel-section-name BEDTools' # fi diff --git a/README.md b/README.md index 900fcd220..9eb1f093d 100644 --- a/README.md +++ b/README.md @@ -39,10 +39,12 @@ In short, with 19.05: - [Usage](#Usage) - [Upgrading images](#Upgrading-images) - [PostgreSQL migration](#Postgresql-migration) - - [Enabling Interactive Environments in Galaxy](#Enabling-Interactive-Environments-in-Galaxy) + - [Enabling Interactive Tools in Galaxy](#Enabling-Interactive-Tools-in-Galaxy) - [Using passive mode FTP or SFTP](#Using-passive-mode-FTP-or-SFTP) - [Using Parent docker](#Using-Parent-docker) - [Galaxy Report Webapp](#Galaxy-Report-Webapp) + - [RabbitMQ Management](#RabbitMQ-Management) + - [Flower Webapp](#Flower-Webapp) - [Galaxy's config settings](#Galaxys-config-settings) - [Configuring Galaxy's behind a proxy](#Galaxy-behind-proxy) - [On-demand reference data with CVMFS](#cvmfs) @@ -181,14 +183,14 @@ for f in *; do echo $f; diff $f ../galaxy-central/config/$f; read; done ```sh docker exec -it bash -supervisorctl stop galaxy: +galaxyctl stop sh manage_db.sh upgrade exit ``` 5. Restart Galaxy ```sh -docker exec -it supervisorctl start galaxy: +docker exec -it galaxyctl start ``` (Alternatively, restart the whole container) @@ -283,7 +285,7 @@ You can do it the following way (based on the "The quick upgrade method" above): 1. Stop Galaxy in the old container ```sh -docker exec -it supervisorctl stop galaxy: +docker exec -it galaxyctl stop ``` 2. Dump the old database @@ -315,7 +317,7 @@ Wait for the startup process to finish (Galaxy should be accessible) ```sh docker exec -it bash -supervisorctl stop galaxy: +galaxyctl stop su postgres psql -f /export/postgresql/9.3dump.sql postgres exit @@ -333,7 +335,7 @@ for f in *; do echo $f; diff $f ../galaxy-central/config/$f; read; done ```sh docker exec -it bash -supervisorctl stop galaxy: +galaxyctl stop sh manage_db.sh upgrade exit ``` @@ -341,7 +343,7 @@ exit 5. Restart Galaxy (= step 5 of the "The quick upgrade method" above) ```sh -docker exec -it supervisorctl start galaxy: +docker exec -it galaxyctl start ``` (Alternatively, restart the whole container) @@ -350,20 +352,33 @@ docker exec -it supervisorctl start galaxy: If you are *very* sure that everything went well, you can delete `/export/postgresql/9.3dump.sql` and `/export/postgresql/9.3/` to save some space. -## Enabling Interactive Environments in Galaxy [[toc]](#toc) +## Enabling Interactive Tools in Galaxy [[toc]](#toc) -Interactive Environments (IE) are sophisticated ways to extend Galaxy with powerful services, like [Jupyter](http://jupyter.org/), in a secure and reproducible way. +Interactive Tools (IT) are sophisticated ways to extend Galaxy with powerful services, like [Jupyter](http://jupyter.org/), in a secure and reproducible way. For this we need to be able to launch Docker containers inside our Galaxy Docker container. At least docker 1.3 is needed on the host system. ```sh -docker run -d -p 8080:80 -p 8021:21 -p 8800:8800 \ +docker run -d -p 8080:80 -p 8021:21 \ --privileged=true \ -v /home/user/galaxy_storage/:/export/ \ bgruening/galaxy-stable ``` -The port 8800 is the proxy port that is used to handle Interactive Environments. `--privileged` is needed to start docker containers inside docker. If your IE does not open, please make sure you open your Galaxy instance with your hostname or a [FQDN](https://en.wikipedia.org/wiki/Fully_qualified_domain_name), but not with localhost or 127.0.0.1. +The port 4002 is the proxy port that is used to handle Interactive Tools. `--privileged` is needed to start docker containers inside docker. + +Additionally, you can set the `GALAXY_DOMAIN` environment variable to specify the domain name for your Galaxy instance to ensure that domain-based ITs work correctly. By default, it is set to `localhost`. If you have your own domain, you can specify it instead. + +If you're using the default job configuration, set the `GALAXY_DESTINATIONS_DEFAULT` environment variable to a Docker-enabled destination. By default, this is set to `slurm_cluster`, so you'll need to update it accordingly. Alternatively, you can also provide your own job configuration file. + +```sh +docker run -d -p 8080:80 -p 8021:21 \ + --privileged=true \ + -v /home/user/galaxy_storage/:/export/ \ + -e "GALAXY_DOMAIN=your.domain.com" \ + -e "GALAXY_DESTINATIONS_DEFAULT=slurm_cluster_docker" \ + bgruening/galaxy-stable +``` ## Using passive mode FTP or SFTP [[toc]](#toc) @@ -399,7 +414,7 @@ sftp -v -P 8022 -o User=admin@galaxy.org localhost <<< $'put ' On some linux distributions, Docker-In-Docker can run into issues (such as running out of loopback interfaces). If this is an issue, you can use a 'legacy' mode that use a docker socket for the parent docker installation mounted inside the container. To engage, set the environmental variable `DOCKER_PARENT` ```sh -docker run -p 8080:80 -p 8021:21 -p 8800:8800 \ +docker run -p 8080:80 -p 8021:21 \ --privileged=true -e DOCKER_PARENT=True \ -v /var/run/docker.sock:/var/run/docker.sock \ -v /home/user/galaxy_storage/:/export/ \ @@ -408,7 +423,7 @@ docker run -p 8080:80 -p 8021:21 -p 8800:8800 \ ## Galaxy Report Webapp [[toc]](#toc) -For admins wishing to have more information on the status of a galaxy instance, the Galaxy Report Webapp is served on `http://localhost:8080/reports`. As default this site is password protected with `admin:admin`. You can change this by providing a `reports_htpasswd` file in `/home/user/galaxy_storage/`. +For admins wishing to have more information on the status of a galaxy instance, the Galaxy Report Webapp is served on `http://localhost:8080/reports`. As default this site is password protected with `admin:admin`. You can change this by providing a `common_htpasswd` file in `/home/user/galaxy_storage/`. You can disable the Report Webapp entirely by providing the environment variable `NONUSE` during container startup. @@ -418,9 +433,33 @@ docker run -p 8080:80 \ bgruening/galaxy-stable ``` +## RabbitMQ Management [[toc]](#toc) + +RabbitMQ is used as the broker for services like Celery. RabbitMQ provides a dedicated web interface for managing message queues, accessible at `http://localhost:8080/rabbitmq/`. This interface allows you to monitor queues, exchanges, bindings, and more. By default, it is password protected with `admin:admin`, but the credentials can be changed after logging in. + +To completely disable RabbitMQ, you can set the `NONUSE` environment variable during container startup. + +```sh +docker run -p 8080:80 \ + -e "NONUSE=rabbitmq" \ + bgruening/galaxy-stable +``` + +## Flower Webapp [[toc]](#toc) + +Flower is a web-based tool for monitoring and administering Celery. It is accessible at `http://localhost:8080/flower`. By default, this site is password protected with `admin:admin`. You can change this by providing a `common_htpasswd` file in `/home/user/galaxy_storage/`. + +The Flower Webapp will only be available if both Celery and RabbitMQ are enabled, meaning the environment variable `NONUSE` does not include `celery` and `rabbitmq`. To completely disable the Flower Webapp, you can set the `NONUSE` environment variable during container startup. + +```sh +docker run -p 8080:80 \ + -e "NONUSE=flower" \ + bgruening/galaxy-stable +``` + ## Galaxy's config settings [[toc]](#toc) -Every Galaxy configuration parameter in `config/galaxy.ini` can be overwritten by passing an environment variable to the `docker run` command during startup. The name of the environment variable has to be: +Every Galaxy configuration parameter in `config/galaxy.yml` can be overwritten by passing an environment variable to the `docker run` command during startup. The name of the environment variable has to be: `GALAXY_CONFIG`+ *the_original_parameter_name_in_capital_letters* For example, you can set the Galaxy session timeout to 5 minutes and set your own Galaxy brand by invoking the `docker run` like this: @@ -431,7 +470,7 @@ docker run -p 8080:80 \ bgruening/galaxy-stable ``` -Note, that if you would like to run any of the [cleanup scripts](https://wiki.galaxyproject.org/Admin/Config/Performance/Purge%20Histories%20and%20Datasets), you will need to add the following to `/export/galaxy-central/config/galaxy.yml`: +Note, that if you would like to run any of the [cleanup scripts](https://galaxyproject.org/admin/config/performance/purge-histories-and-datasets/), you will need to add the following to `/export/galaxy-central/config/galaxy.yml`: ``` database_connection = postgresql://galaxy:galaxy@localhost:5432/galaxy @@ -440,11 +479,11 @@ file_path = /export/galaxy-central/database/files ## Security Configuration -*By default* the `admin_users` and `master_api_key` variables are set to: +*By default* the `admin_users` and `bootstrap_admin_api_key` variables are set to: ``` admin_users: admin@galaxy.org -master_api_key: HSNiugRFvgT574F43jZ7N9F3 +bootstrap_admin_api_key: HSNiugRFvgT574F43jZ7N9F3 ``` Additionally Galaxy encodes various internal values that can be part of output using secret string configurable as `id_secret` in the config file (use 5-65 bytes long string). This prevents 'guessing' of Galaxy's internal database sequences. Example: @@ -474,10 +513,9 @@ docker run -p 8080:80 \ ``` ## On-demand reference data with CVMFS [[toc]](#toc) -By default, Galaxy instances launched with this image will have on-demand access to approximately 3TB of +By default, Galaxy instances launched with this image will have on-demand access to approximately 4TB of reference genomes and indexes. These are the same reference data available on the main Galaxy server. -This is achieved by connecting to Galaxy's CernVM filesystem (CVMFS) at `data.galaxyproject.org` repository, -which is geographically distributed among numerous servers. +This is achieved by connecting to Galaxy's CernVM filesystem (CVMFS) at `cvmfs-config.galaxyproject.org` repository, which provides automatic configuration for all galaxyproject.org CVMFS repositories, including `data.galaxyproject.org`, and ensures they remain up to date. The CVMFS capability doesn't add to the size of the Docker image, but when running, CVMFS maintains a cache to keep the most recently used data on the local disk. @@ -491,11 +529,11 @@ The Galaxy welcome screen can be changed by providing a `welcome.html` page in ` ## Deactivating services [[toc]](#toc) -Non-essential services can be deactivated during startup. Set the environment variable `NONUSE` to a comma separated list of services. Currently, `nodejs`, `postgres`, `proftp`, `reports`, `slurmd` and `slurmctld` are supported. +Non-essential services can be deactivated during startup. Set the environment variable `NONUSE` to a comma separated list of services. Currently, `postgres`, `cron`, `proftp`, `reports`, `nodejs`, `condor`, `slurmd`, `slurmctld`, `celery`, `rabbitmq`, `redis`, `flower` and `tusd` are supported. ```sh docker run -d -p 8080:80 -p 9002:9002 \ - -e "NONUSE=nodejs,proftp,reports,slurmd,slurmctld" \ + -e "NONUSE=cron,proftp,reports,nodejs,condor,slurmd,slurmctld,celery,rabbitmq,redis,flower,tusd" \ bgruening/galaxy-stable ``` @@ -507,7 +545,7 @@ A graphical user interface, to start and stop your services, is available on por If you want to restart Galaxy without restarting the entire Galaxy container you can use `docker exec` (docker > 1.3). ```sh -docker exec supervisorctl restart galaxy: +docker exec galaxyctl restart ``` In addition you can start/stop every supervisord process using a web interface on port `9002`. Start your container with: @@ -575,9 +613,8 @@ docker run -d -p 8080:80 -p 8021:21 \ -e GALAXY_CONFIG_TOOL_DATA_PATH="/cluster_storage/galaxy/galaxy_export/galaxy-central/tool-data" \ -e GALAXY_CONFIG_SHED_TOOL_DATA_PATH="/cluster_storage/galaxy/galaxy_export/galaxy-central/tool-data" \ # The following settings are for directories that can be anywhere on the cluster fs. -GALAXY_CONFIG_JOB_WORKING_DIRECTORY="/cluster_storage/galaxy/galaxy_export/galaxy-central/database/job_working_directory" \ #IMPORTANT: needs to be created manually. Can also be placed elsewhere, but is originally located here +-e GALAXY_CONFIG_JOB_WORKING_DIRECTORY="/cluster_storage/galaxy/galaxy_export/galaxy-central/database/job_working_directory" \ #IMPORTANT: needs to be created manually. Can also be placed elsewhere, but is originally located here -e GALAXY_CONFIG_NEW_FILE_PATH="/cluster_storage/galaxy/tmp" \ # IMPORTANT: needs to be created manually. This needs to be writable by UID=1450 and have its flippy bit set (chmod 1777 for world-writable with flippy bit) --e GALAXY_CONFIG_CLUSTER_FILES_DIRECTORY="/cluster_storage/galaxy/job_scripts" \ # Job scripts and stdout and stderr will be written here. -e GALAXY_CONFIG_OUTPUTS_TO_WORKING_DIRECTORY=False \ # Writes Job scripts, stdout and stderr to job_working_directory. -e GALAXY_CONFIG_RETRY_JOB_OUTPUT_COLLECTION=5 \ #IF your cluster fs uses nfs this may introduce latency. You can set galaxy to retry if a job output is not yet created. # Conda settings. IMPORTANT! @@ -682,7 +719,7 @@ for most Galaxy instances. # Enable Galaxy to use BioContainers (Docker) [[toc]](#toc) This is a very cool feature where Galaxy automatically detects that your tool has an associated docker image, pulls it and runs it for you. These images (when available) have been generated using [mulled](https://github.com/mulled). To test, install the [IUC bedtools](https://toolshed.g2.bx.psu.edu/repository?repository_id=8d84903cc667dbe7&changeset_revision=7b3aaff0d78c) from the toolshed. When you try to execute *ClusterBed* for example. You may get a missing dependancy error for *bedtools*. But bedtools has an associated docker image on [quay.io](https://quay.io/). Now configure Galaxy as follows: -- Add this environment variable to `docker run`: `-e GALAXY_CONFIG_ENABLE_BETA_MULLED_CONTAINERS=True` +- Add this environment variable to `docker run`: `-e GALAXY_CONFIG_ENABLE_MULLED_CONTAINERS=True` - In `job_conf.xml` configure a Docker enabled destination as follows: ```xml @@ -702,9 +739,9 @@ When you execute the tool again, Galaxy will pull the image from Biocontainers ( | `ENABLE_TTS_INSTALL` | Enables the Test Tool Shed during container startup. This change is not persistent. (`ENABLE_TTS_INSTALL=True`) | | `GALAXY_LOGGING` | Enables for verbose logging at Docker stdout. (`GALAXY_LOGGING=full`) | | `BARE` | Disables all default Galaxy tools. (`BARE=True`) | -| `NONUSE` | Disable services during container startup. (`NONUSE=nodejs,proftp,reports,slurmd,slurmctld`) | -| `UWSGI_PROCESSES` | Set the number of uwsgi processes (`UWSGI_PROCESSES=2) | -| `UWSGI_THREADS` | Set the number of uwsgi threads (`UWSGI_THREADS=4`) | +| `NONUSE` | Disable services during container startup. (`NONUSE=cron,proftp,reports,nodejs,condor,slurmd,slurmctld,celery,rabbitmq,redis,flower,tusd`) | +| `GUNICORN_WORKERS` | Set the number of gunicorn workers (`GUNICORN_WORKERS=2`) | +| `CELERY_WORKERS` | Set the number of celery workers (`CELERY_WORKERS=2`) | | `GALAXY_DOCKER_ENABLED` | Enable Galaxy to use Docker containers if annotated in tools (`GALAXY_DOCKER_ENABLED=False`) | | `GALAXY_DOCKER_VOLUMES` | Specify volumes that should be mounted into tool containers (`GALAXY_DOCKER_VOLUMES=""`) | | `GALAXY_HANDLER_NUMPROCS` | Set the number of Galaxy handler (`GALAXY_HANDLER_NUMPROCS=2`) | @@ -721,13 +758,13 @@ Letsencrypt with the following environment variables: | Name | Description | |---|---| | `USE_HTTPS` | Set `USE_HTTPS=True` to set up HTTPS via self-signed certificates. If you have your own certificates, copy them to `/export/{server.key,server.crt}`. | -| `USE_HTTPS_LETSENCRYPT` | Set `USE_HTTPS_LETSENCRYPT=True` to automatically set up HTTPS using Letsencrypt as a certificate authority. (Requires you to also set `GALAXY_CONFIG_GALAXY_INFRASTRUCTURE_URL`) Note: only set one of `USE_HTTPS` and `USE_HTTPS_LETSENCRYPT` to true. | -| `GALAXY_CONFIG_GALAXY_INFRASTRUCTURE_URL` | Set `GALAXY_CONFIG_GALAXY_INFRASTRUCTURE_URL=` so that Letsencrypt can test your that you own the domain you claim to own in order to issue you your HTTPS cert. | +| `USE_HTTPS_LETSENCRYPT` | Set `USE_HTTPS_LETSENCRYPT=True` to automatically set up HTTPS using Letsencrypt as a certificate authority. (Requires you to also set `GALAXY_DOMAIN`) Note: only set one of `USE_HTTPS` and `USE_HTTPS_LETSENCRYPT` to true. | +| `GALAXY_DOMAIN` | Set `GALAXY_DOMAIN=` so that Letsencrypt can test your that you own the domain you claim to own in order to issue you your HTTPS cert. | # Lite Mode [[toc]](#toc) -The lite mode will only start postgresql and a single Galaxy process, without nginx, uwsgi or any other special feature from the normal mode. In particular there is no support for the export folder or any Magic Environment variables. +The lite mode will only start postgresql and a single Galaxy process, without nginx, gunicorn or any other special feature from the normal mode. In particular there is no support for the export folder or any Magic Environment variables. ```sh docker run -i -t -p 8080:8080 bgruening/galaxy-stable startup_lite @@ -775,18 +812,17 @@ RUN add-tool-shed --url 'http://testtoolshed.g2.bx.psu.edu/' --name 'Test Tool S RUN install-biojs msa # Adding the tool definitions to the container -ADD my_tool_list.yml $GALAXY_ROOT/my_tool_list.yml +ADD my_tool_list.yml $GALAXY_ROOT_DIR/my_tool_list.yml # Install deepTools -RUN install-tools $GALAXY_ROOT/my_tool_list.yml +RUN install-tools $GALAXY_ROOT_DIR/my_tool_list.yml # Mark folders as imported from the host. VOLUME ["/export/", "/data/", "/var/lib/docker"] -# Expose port 80 (webserver), 21 (FTP server), 8800 (Proxy) +# Expose port 80 (webserver), 21 (FTP server) EXPOSE :80 EXPOSE :21 -EXPOSE :8800 # Autostart script that is invoked during container start CMD ["/usr/bin/startup"] @@ -798,7 +834,7 @@ The RNA-workbench has advanced examples about: - populating Galaxy data libraries ```bash - setup-data-libraries -i $GALAXY_ROOT/library_data.yaml -g http://localhost:8080 + setup-data-libraries -i $GALAXY_ROOT_DIR/library_data.yaml -g http://localhost:8080 -u $GALAXY_DEFAULT_ADMIN_USER -p $GALAXY_DEFAULT_ADMIN_PASSWORD ``` @@ -863,12 +899,12 @@ In rare situations where you cannot share your tools but still want to include t - Create a `tool_conf.xml` file for your tools. - This should look similar to the main [`tool_conf.xml`](https://github.com/galaxyproject/galaxy/blob/dev/config/tool_conf.xml.sample) file, but references your tools from the new directory. In other words a tool entry should look like this ``. + This should look similar to the main [`tool_conf.xml`](https://github.com/galaxyproject/galaxy/blob/dev/lib/galaxy/config/sample/tool_conf.xml.sample) file, but references your tools from the new directory. In other words a tool entry should look like this ``. Your `tool_conf.xml` should be available from inside of the container. We assume you have it stored under `/local_tools/my_tools.xml`. - Add the new tool config file to the Galaxy configuration. - To make Galaxy aware of your new tool configuration file you need to add the path to `tool_config_file`, which is by default `#tool_config_file = config/tool_conf.xml,config/shed_tool_conf.xml`. You can do this during container start by setting the environment variable `-e GALAXY_CONFIG_TOOL_CONFIG_FILE=config/tool_conf.xml.sample,config/shed_tool_conf.xml.sample,/local_tools/my_tools.xml`. + To make Galaxy aware of your new tool configuration file you need to add the path to `tool_config_file`, which is set to `/etc/galaxy/tool_conf.xml`. You can do this during container start by setting the environment variable `-e GALAXY_CONFIG_TOOL_CONFIG_FILE=/etc/galaxy/tool_conf.xml,/local_tools/my_tools.xml`. # Users & Passwords [[toc]](#toc) @@ -880,7 +916,16 @@ If you want to create new users, please make sure to use the `/export/` volume. The proftpd server is configured to use the main galaxy PostgreSQL user to access the database and select the username and password. If you want to run the docker container in production, please do not forget to change the user credentials in `/etc/proftpd/proftpd.conf` too. -The Galaxy Report Webapp is `htpasswd` protected with username and password set to `admin`. +The Galaxy Report and Flower Webapps are `htpasswd` protected with username and password set to `admin`. + +RabbitMQ is configured with: + - Admin username: `admin` + - Admin password: `admin` + - Galaxy vhost: `galaxy_internal` + - Galaxy username: `galaxy` + - Galaxy password: `galaxy` + - Flower username: `flower` + - Flower password: `flower` # Development [[toc]](#toc) diff --git a/docs/Running_jobs_outside_of_the_container.md b/docs/Running_jobs_outside_of_the_container.md index 35da13fab..00ea68311 100644 --- a/docs/Running_jobs_outside_of_the_container.md +++ b/docs/Running_jobs_outside_of_the_container.md @@ -120,7 +120,7 @@ cp job_conf.xml /data/galaxy/galaxy-central/config ``` We restart galaxy inside the container ```sh -docker exec galaxy-slurm-test supervisorctl restart galaxy: +docker exec galaxy-slurm-test galaxyctl restart ``` We should now be able to submit galaxy jobs through the slurm container. @@ -152,5 +152,5 @@ Now quit the slurm container, edit the job_conf.xml and set ``` and finally restart galaxy: ``` -docker exec galaxy-slurm-test supervisorctl restart galaxy: +docker exec galaxy-slurm-test galaxyctl restart ``` diff --git a/galaxy/Dockerfile b/galaxy/Dockerfile index ffe96856e..5662f2026 100644 --- a/galaxy/Dockerfile +++ b/galaxy/Dockerfile @@ -2,7 +2,7 @@ # # VERSION Galaxy-central -FROM ubuntu:18.04 +FROM ubuntu:22.04 MAINTAINER Björn A. Grüning, bjoern.gruening@gmail.com @@ -16,17 +16,18 @@ MAINTAINER Björn A. Grüning, bjoern.gruening@gmail.com ARG GALAXY_RELEASE ARG GALAXY_REPO -ENV GALAXY_RELEASE=${GALAXY_RELEASE:-release_20.09} \ +ENV GALAXY_RELEASE=${GALAXY_RELEASE:-release_24.1} \ GALAXY_REPO=${GALAXY_REPO:-https://github.com/galaxyproject/galaxy} \ - GALAXY_ROOT=/galaxy-central \ + GALAXY_ROOT_DIR=/galaxy-central \ GALAXY_CONFIG_DIR=/etc/galaxy \ EXPORT_DIR=/export \ DEBIAN_FRONTEND=noninteractive \ - PG_VERSION=11 + PG_VERSION=15 ENV GALAXY_CONFIG_FILE=$GALAXY_CONFIG_DIR/galaxy.yml \ GALAXY_CONFIG_JOB_CONFIG_FILE=$GALAXY_CONFIG_DIR/job_conf.xml \ GALAXY_CONFIG_JOB_METRICS_CONFIG_FILE=$GALAXY_CONFIG_DIR/job_metrics_conf.xml \ + GALAXY_CONFIG_TOOL_CONFIG_FILE=/etc/galaxy/tool_conf.xml \ GALAXY_CONFIG_TOOL_DATA_TABLE_CONFIG_PATH=/etc/galaxy/tool_data_table_conf.xml \ GALAXY_CONFIG_WATCH_TOOL_DATA_DIR=True \ GALAXY_CONFIG_TOOL_DEPENDENCY_DIR=$EXPORT_DIR/tool_deps \ @@ -48,28 +49,33 @@ ENV GALAXY_CONFIG_FILE=$GALAXY_CONFIG_DIR/galaxy.yml \ GALAXY_RUNNERS_ENABLE_CONDOR=False \ GALAXY_CONFIG_DATABASE_CONNECTION=postgresql://galaxy:galaxy@localhost:5432/galaxy?client_encoding=utf8 \ GALAXY_CONFIG_ADMIN_USERS=admin@galaxy.org \ - GALAXY_CONFIG_MASTER_API_KEY=HSNiugRFvgT574F43jZ7N9F3 \ + GALAXY_CONFIG_BOOTSTRAP_ADMIN_API_KEY=HSNiugRFvgT574F43jZ7N9F3 \ GALAXY_CONFIG_BRAND="Galaxy Docker Build" \ GALAXY_CONFIG_STATIC_ENABLED=False \ # Define the default postgresql database path PG_DATA_DIR_DEFAULT=/var/lib/postgresql/$PG_VERSION/main/ \ PG_CONF_DIR_DEFAULT=/etc/postgresql/$PG_VERSION/main/ \ PG_DATA_DIR_HOST=$EXPORT_DIR/postgresql/$PG_VERSION/main/ \ - # The following 2 ENV vars can be used to set the number of uwsgi processes and threads - UWSGI_PROCESSES=2 \ - UWSGI_THREADS=4 \ + # The following ENV var can be used to set the number of gunicorn workers + GUNICORN_WORKERS=2 \ + # The following ENV var can be used to set the number of celery workers + CELERY_WORKERS=2 \ # Set HTTPS to use a self-signed certificate (or your own certificate in $EXPORT_DIR/{server.key,server.crt}) USE_HTTPS=False \ - # Set USE_HTTPS_LENSENCRYPT and GALAXY_CONFIG_GALAXY_INFRASTRUCTURE_URL to a domain that is reachable to get a letsencrypt certificate + # Set USE_HTTPS_LENSENCRYPT and GALAXY_DOMAIN to a domain that is reachable to get a letsencrypt certificate USE_HTTPS_LETSENCRYPT=False \ - GALAXY_CONFIG_GALAXY_INFRASTRUCTURE_URL=http://localhost \ + GALAXY_DOMAIN=localhost \ # Set the number of Galaxy handlers GALAXY_HANDLER_NUMPROCS=2 \ # Setting a standard encoding. This can get important for things like the unix sort tool. LC_ALL=en_US.UTF-8 \ LANG=en_US.UTF-8 \ NODE_OPTIONS=--max-old-space-size=4096 \ - GALAXY_CONDA_PREFIX=/tool_deps/_conda + GALAXY_CONDA_PREFIX=/tool_deps/_conda \ + GRAVITY_CONFIG_FILE=$GALAXY_CONFIG_DIR/gravity.yml \ + GALAXY_CONFIG_TUS_UPLOAD_STORE=/tmp/tus_upload_store \ + GALAXY_CONFIG_INTERACTIVETOOLS_MAP=$GALAXY_ROOT_DIR/database/interactivetools_map.sqlite \ + GALAXY_INTERACTIVE_TOOLS_CONFIG_FILE=$GALAXY_CONFIG_DIR/tools_conf_interactive.xml # 16MB RUN echo "force-unsafe-io" > /etc/dpkg/dpkg.cfg.d/02apt-speedup \ @@ -77,7 +83,7 @@ RUN echo "force-unsafe-io" > /etc/dpkg/dpkg.cfg.d/02apt-speedup \ && apt-get -qq update && apt-get install --no-install-recommends -y locales \ && locale-gen en_US.UTF-8 && dpkg-reconfigure locales \ && apt-get autoremove -y && apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* ~/.cache/ \ - && rm -rf /tmp/* /root/.cache/ /var/cache/* /galaxy-central/client/node_modules/ $GALAXY_VIRTUAL_ENV/src/ /home/galaxy/.cache/ /home/galaxy/.npm/ + && rm -rf /tmp/* /root/.cache/ /var/cache/* $GALAXY_ROOT_DIR/client/node_modules/ $GALAXY_VIRTUAL_ENV/src/ /home/galaxy/.cache/ /home/galaxy/.npm/ # Create the postgres user before apt-get does (with the configured UID/GID) to facilitate sharing $EXPORT_DIR/postgresql with non-Linux hosts RUN groupadd -r postgres -g $GALAXY_POSTGRES_GID \ @@ -86,39 +92,43 @@ RUN groupadd -r postgres -g $GALAXY_POSTGRES_GID \ && curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - \ && add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" \ && sudo add-apt-repository ppa:natefoo/slurm-drmaa \ + && curl -fsSL https://research.cs.wisc.edu/htcondor/repo/keys/HTCondor-current-Key | sudo apt-key add - \ + && sudo add-apt-repository "deb https://research.cs.wisc.edu/htcondor/repo/ubuntu/current jammy main" \ && apt-get update -qq \ ## && apt-get purge -y software-properties-common gpg-agent \ ## && apt-get install postgresql-10 --no-install-recommends -y \ && apt-get install nginx-extras nginx-common --no-install-recommends -y \ - && apt-get install docker-ce-cli --no-install-recommends -y \ + && apt-get install docker-ce --no-install-recommends -y \ && apt-get install slurm-client slurmd slurmctld slurm-drmaa1 --no-install-recommends -y \ && ln -s /usr/lib/slurm-drmaa/lib/libdrmaa.so.1 /usr/lib/slurm-drmaa/lib/libdrmaa.so \ - && apt-get install proftpd proftpd-mod-pgsql --no-install-recommends -y \ + && apt-get install proftpd proftpd-mod-pgsql proftpd-mod-crypto --no-install-recommends -y \ && apt-get install munge libmunge-dev --no-install-recommends -y \ && apt-get install nano --no-install-recommends -y \ && apt-get install htcondor --no-install-recommends -y \ && apt-get install git --no-install-recommends -y \ && apt-get install gridengine-common gridengine-drmaa1.0 --no-install-recommends -y \ && apt-get install rabbitmq-server --no-install-recommends -y \ + && apt-get install redis-server --no-install-recommends -y \ && apt-get install --no-install-recommends -y libswitch-perl supervisor \ - && apt-get purge -y software-properties-common gpg-agent apt-transport-https python3-minimal \ + && apt-get purge -y software-properties-common gpg-agent systemd \ && apt-get autoremove -y && apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* && rm -rf ~/.cache/ \ && mkdir -p /etc/supervisor/conf.d/ /var/log/supervisor/ \ # we will recreate this later ## && rm -rf $PG_DATA_DIR_DEFAULT \ && groupadd -r $GALAXY_USER -g $GALAXY_GID \ && useradd -u $GALAXY_UID -r -g $GALAXY_USER -d $GALAXY_HOME -c "Galaxy user" --shell /bin/bash $GALAXY_USER \ + && usermod -aG docker $GALAXY_USER \ && mkdir $EXPORT_DIR $GALAXY_HOME $GALAXY_LOGS_DIR && chown -R $GALAXY_USER:$GALAXY_USER $GALAXY_HOME $EXPORT_DIR $GALAXY_LOGS_DIR \ # cleanup dance && find /usr/lib/ -name '*.pyc' -delete \ - && rm -rf /tmp/* /root/.cache/ /var/cache/* $GALAXY_ROOT/client/node_modules/ $GALAXY_VIRTUAL_ENV/src/ /home/galaxy/.cache/ /home/galaxy/.npm/ + && rm -rf /tmp/* /root/.cache/ /var/cache/* $GALAXY_ROOT_DIR/client/node_modules/ $GALAXY_VIRTUAL_ENV/src/ /home/galaxy/.cache/ /home/galaxy/.npm/ ADD ./bashrc $GALAXY_HOME/.bashrc # Install miniconda, then virtualenv from conda and then # download latest stable release of Galaxy. -RUN curl -s -L https://repo.anaconda.com/miniconda/Miniconda3-4.7.10-Linux-x86_64.sh > ~/miniconda.sh \ +RUN curl -s -L https://repo.anaconda.com/miniconda/Miniconda3-py310_24.5.0-0-Linux-x86_64.sh > ~/miniconda.sh \ && /bin/bash ~/miniconda.sh -b -p $GALAXY_CONDA_PREFIX/ \ && rm ~/miniconda.sh \ && ln -s $GALAXY_CONDA_PREFIX/etc/profile.d/conda.sh /etc/profile.d/conda.sh \ @@ -132,32 +142,40 @@ RUN curl -s -L https://repo.anaconda.com/miniconda/Miniconda3-4.7.10-Linux-x86_6 && chown $GALAXY_USER:$GALAXY_USER -R /tool_deps/ /etc/profile.d/conda.sh \ && conda clean --packages -t -i \ # cleanup dance - && find $GALAXY_ROOT -name '*.pyc' -delete | true \ + && find $GALAXY_ROOT_DIR -name '*.pyc' -delete | true \ && find /usr/lib/ -name '*.pyc' -delete | true \ && find $GALAXY_VIRTUAL_ENV -name '*.pyc' -delete | true \ - && rm -rf /tmp/* /root/.cache/ /var/cache/* $GALAXY_ROOT/client/node_modules/ $GALAXY_VIRTUAL_ENV/src/ /home/galaxy/.cache/ /home/galaxy/.npm + && rm -rf /tmp/* /root/.cache/ /var/cache/* $GALAXY_ROOT_DIR/client/node_modules/ $GALAXY_VIRTUAL_ENV/src/ /home/galaxy/.cache/ /home/galaxy/.npm RUN cp $GALAXY_HOME/.bashrc ~/ -RUN mkdir $GALAXY_ROOT \ - && curl -L -s $GALAXY_REPO/archive/$GALAXY_RELEASE.tar.gz | tar xzf - --strip-components=1 -C $GALAXY_ROOT \ +RUN mkdir $GALAXY_ROOT_DIR \ + && curl -L -s $GALAXY_REPO/archive/$GALAXY_RELEASE.tar.gz | tar xzf - --strip-components=1 -C $GALAXY_ROOT_DIR \ && PATH=$GALAXY_CONDA_PREFIX/bin/:$PATH virtualenv $GALAXY_VIRTUAL_ENV \ && chown -R $GALAXY_USER:$GALAXY_USER $GALAXY_VIRTUAL_ENV \ - && chown -R $GALAXY_USER:$GALAXY_USER $GALAXY_ROOT \ + && chown -R $GALAXY_USER:$GALAXY_USER $GALAXY_ROOT_DIR \ # Setup Galaxy configuration files. && mkdir -p $GALAXY_CONFIG_DIR $GALAXY_CONFIG_DIR/web \ && chown -R $GALAXY_USER:$GALAXY_USER $GALAXY_CONFIG_DIR \ - && rm -rf /tmp/* /root/.cache/ /var/cache/* $GALAXY_ROOT/client/node_modules/ $GALAXY_VIRTUAL_ENV/src/ /home/galaxy/.cache/ /home/galaxy/.npm \ - && su $GALAXY_USER -c "cp $GALAXY_ROOT/config/galaxy.yml.sample $GALAXY_CONFIG_FILE" \ + && rm -rf /tmp/* /root/.cache/ /var/cache/* $GALAXY_ROOT_DIR/client/node_modules/ $GALAXY_VIRTUAL_ENV/src/ /home/galaxy/.cache/ /home/galaxy/.npm \ + && su $GALAXY_USER -c "cp $GALAXY_ROOT_DIR/config/galaxy.yml.sample $GALAXY_CONFIG_FILE" \ + && su $GALAXY_USER -c "cp $GALAXY_ROOT_DIR/config/tool_conf.xml.sample $GALAXY_CONFIG_TOOL_CONFIG_FILE" \ # cleanup dance - && find $GALAXY_ROOT -name '*.pyc' -delete | true \ + && find $GALAXY_ROOT_DIR -name '*.pyc' -delete | true \ && find /usr/lib/ -name '*.pyc' -delete | true \ && find $GALAXY_VIRTUAL_ENV -name '*.pyc' -delete | true \ - && rm -rf /tmp/* /root/.cache/ /var/cache/* $GALAXY_ROOT/client/node_modules/ $GALAXY_VIRTUAL_ENV/src/ /home/galaxy/.cache/ /home/galaxy/.npm + && rm -rf /tmp/* /root/.cache/ /var/cache/* $GALAXY_ROOT_DIR/client/node_modules/ $GALAXY_VIRTUAL_ENV/src/ /home/galaxy/.cache/ /home/galaxy/.npm -ADD ./reports_wsgi.ini.sample $GALAXY_CONFIG_DIR/reports_wsgi.ini +ADD ./reports.yml.sample $GALAXY_CONFIG_DIR/reports.yml ADD sample_tool_list.yaml $GALAXY_HOME/ephemeris/sample_tool_list.yaml + +# Activate Interactive Tools during runtime +ADD ./tools_conf_interactive.xml.sample $GALAXY_INTERACTIVE_TOOLS_CONFIG_FILE + +RUN chown $GALAXY_USER:$GALAXY_USER $GALAXY_CONFIG_DIR/reports.yml $GALAXY_HOME/ephemeris/sample_tool_list.yaml $GALAXY_INTERACTIVE_TOOLS_CONFIG_FILE +RUN chmod 0644 $GALAXY_CONFIG_DIR/reports.yml $GALAXY_HOME/ephemeris/sample_tool_list.yaml $GALAXY_INTERACTIVE_TOOLS_CONFIG_FILE + ADD roles/ /ansible/roles ADD provision.yml /ansible/provision.yml ADD postgresql_provision.yml /ansible/postgresql_provision.yml @@ -166,11 +184,13 @@ ADD postgresql_provision.yml /ansible/postgresql_provision.yml RUN apt update -qq && apt install --no-install-recommends -y ansible dirmngr gpg gpg-agent \ && ansible-playbook /ansible/postgresql_provision.yml \ && apt purge ansible dirmngr gpg gpg-agent -y && apt-get autoremove -y && apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* \ + # Make python3 standard + && update-alternatives --install /usr/bin/python python /usr/bin/python3 10 \ # cleanup dance - && find $GALAXY_ROOT/ -name '*.pyc' -delete | true \ + && find $GALAXY_ROOT_DIR/ -name '*.pyc' -delete | true \ && find /usr/lib/ -name '*.pyc' -delete | true \ && find $GALAXY_VIRTUAL_ENV -name '*.pyc' -delete | true \ - && rm -rf /tmp/* /root/.cache/ /var/cache/* $GALAXY_ROOT/client/node_modules/ $GALAXY_VIRTUAL_ENV/src/ /home/galaxy/.cache/ /home/galaxy/.npm + && rm -rf /tmp/* /root/.cache/ /var/cache/* $GALAXY_ROOT_DIR/client/node_modules/ $GALAXY_VIRTUAL_ENV/src/ /home/galaxy/.cache/ /home/galaxy/.npm # Include all needed scripts from the host @@ -181,40 +201,45 @@ ADD ./setup_postgresql.py /usr/local/bin/setup_postgresql.py # 2. Create DB-user 'galaxy' with password 'galaxy' in database 'galaxy' # 3. Create Galaxy Admin User 'admin@galaxy.org' with password 'admin' and API key 'admin' -RUN mkdir -p /shed_tools $EXPORT_DIR/ftp/ \ - && chown $GALAXY_USER:$GALAXY_USER /shed_tools $EXPORT_DIR/ftp \ +RUN mkdir -p /shed_tools $EXPORT_DIR/ftp/ $GALAXY_CONFIG_TUS_UPLOAD_STORE \ + && chown $GALAXY_USER:$GALAXY_USER /shed_tools $EXPORT_DIR/ftp $GALAXY_CONFIG_TUS_UPLOAD_STORE \ && ln -s /tool_deps/ $EXPORT_DIR/tool_deps \ # Configure Galaxy to use the Tool Shed && chown $GALAXY_USER:$GALAXY_USER $EXPORT_DIR/tool_deps \ - && apt update -qq && apt install --no-install-recommends -y ansible \ + && apt update -qq && apt install --no-install-recommends -y ansible gpg gpg-agent g++ make \ && ansible-playbook /ansible/provision.yml \ --extra-vars galaxy_venv_dir=$GALAXY_VIRTUAL_ENV \ --extra-vars galaxy_log_dir=$GALAXY_LOGS_DIR \ --extra-vars galaxy_user_name=$GALAXY_USER \ --extra-vars galaxy_config_file=$GALAXY_CONFIG_FILE \ --extra-vars galaxy_config_dir=$GALAXY_CONFIG_DIR \ + --extra-vars gravity_config_file=$GRAVITY_CONFIG_FILE \ --extra-vars galaxy_job_conf_path=$GALAXY_CONFIG_JOB_CONFIG_FILE \ --extra-vars galaxy_job_metrics_conf_path=$GALAXY_CONFIG_JOB_METRICS_CONFIG_FILE \ --extra-vars supervisor_manage_slurm="" \ --extra-vars galaxy_extras_config_condor=True \ --extra-vars galaxy_extras_config_condor_docker=True \ --extra-vars galaxy_extras_config_rabbitmq=True \ + --extra-vars galaxy_extras_config_redis=True \ + --extra-vars galaxy_extras_config_flower=True \ + --extra-vars galaxy_extras_config_tusd=True \ --extra-vars galaxy_extras_config_cvmfs=True \ - --extra-vars galaxy_extras_config_uwsgi=False \ + --extra-vars galaxy_extras_config_gravity=True \ --extra-vars proftpd_db_connection=galaxy@galaxy \ --extra-vars proftpd_files_dir=$EXPORT_DIR/ftp \ --extra-vars proftpd_use_sftp=True \ --extra-vars galaxy_extras_docker_legacy=False \ - --extra-vars galaxy_minimum_version=19.01 \ --extra-vars supervisor_postgres_config_path=$PG_CONF_DIR_DEFAULT/postgresql.conf \ --extra-vars supervisor_postgres_autostart=false \ --extra-vars nginx_use_remote_header=True \ + --extra-vars tus_upload_store_path=$GALAXY_CONFIG_TUS_UPLOAD_STORE \ + --extra-vars gx_it_proxy_sessions_path=$GALAXY_CONFIG_INTERACTIVETOOLS_MAP \ --tags=galaxyextras,cvmfs -c local \ && . $GALAXY_VIRTUAL_ENV/bin/activate \ && pip install WeasyPrint \ && deactivate \ # TODO: no clue why this is needed here again - && cd $GALAXY_ROOT && ./scripts/common_startup.sh \ + && cd $GALAXY_ROOT_DIR && ./scripts/common_startup.sh \ && cd config && find . -name 'node_modules' -type d -prune -exec rm -rf '{}' + \ && find . -name '.cache' -type d -prune -exec rm -rf '{}' + \ && cd / \ @@ -222,13 +247,13 @@ RUN mkdir -p /shed_tools $EXPORT_DIR/ftp/ \ && python /usr/local/bin/setup_postgresql.py --dbuser galaxy --dbpassword galaxy --db-name galaxy --dbpath $PG_DATA_DIR_DEFAULT --dbversion $PG_VERSION \ && service postgresql start \ && service postgresql stop \ - && apt-get autoremove -y && apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* && rm -rf ~/.cache/ \ + && apt purge gpg gpg-agent g++ make gcc -y && apt-get autoremove -y && apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* && rm -rf ~/.cache/ \ # cleanup dance - && find $GALAXY_ROOT/ -name '*.pyc' -delete | true \ + && find $GALAXY_ROOT_DIR/ -name '*.pyc' -delete | true \ && find /usr/lib/ -name '*.pyc' -delete | true \ && find /var/log/ -name '*.log' -delete | true \ && find $GALAXY_VIRTUAL_ENV -name '*.pyc' -delete | true \ - && rm -rf /tmp/* /root/.cache/ /var/cache/* $GALAXY_ROOT/client/node_modules/ $GALAXY_VIRTUAL_ENV/src/ /home/galaxy/.cache/ /home/galaxy/.npm + && rm -rf /tmp/* /root/.cache/ /var/cache/* $GALAXY_ROOT_DIR/client/node_modules/ $GALAXY_VIRTUAL_ENV/src/ /home/galaxy/.cache/ /home/galaxy/.npm RUN touch /var/log/condor/StartLog /var/log/condor/StarterLog /var/log/condor/CollectorLog /var/log/condor/NegotiatorLog && \ mkdir -p /var/run/condor/ /var/lock/condor/ && \ @@ -237,31 +262,29 @@ RUN touch /var/log/condor/StartLog /var/log/condor/StarterLog /var/log/condor/Co # The following commands will be executed as the galaxy user USER $GALAXY_USER -WORKDIR $GALAXY_ROOT +WORKDIR $GALAXY_ROOT_DIR # Updating genome informations from UCSC -#RUN export GALAXY=$GALAXY_ROOT && sh ./cron/updateucsc.sh.sample +#RUN export GALAXY=$GALAXY_ROOT_DIR && sh ./cron/updateucsc.sh.sample ENV GALAXY_CONFIG_JOB_WORKING_DIRECTORY=$EXPORT_DIR/galaxy-central/database/job_working_directory \ GALAXY_CONFIG_FILE_PATH=$EXPORT_DIR/galaxy-central/database/files \ GALAXY_CONFIG_NEW_FILE_PATH=$EXPORT_DIR/galaxy-central/database/files \ GALAXY_CONFIG_TEMPLATE_CACHE_PATH=$EXPORT_DIR/galaxy-central/database/compiled_templates \ GALAXY_CONFIG_CITATION_CACHE_DATA_DIR=$EXPORT_DIR/galaxy-central/database/citations/data \ - GALAXY_CONFIG_CLUSTER_FILES_DIRECTORY=$EXPORT_DIR/galaxy-central/database/pbs \ GALAXY_CONFIG_FTP_UPLOAD_DIR=$EXPORT_DIR/ftp \ GALAXY_CONFIG_FTP_UPLOAD_SITE=galaxy.docker.org \ GALAXY_CONFIG_USE_PBKDF2=False \ GALAXY_CONFIG_NGINX_X_ACCEL_REDIRECT_BASE=/_x_accel_redirect \ - GALAXY_CONFIG_NGINX_X_ARCHIVE_FILES_BASE=/_x_accel_redirect \ GALAXY_CONFIG_DYNAMIC_PROXY_MANAGE=False \ GALAXY_CONFIG_VISUALIZATION_PLUGINS_DIRECTORY=config/plugins/visualizations \ - GALAXY_CONFIG_TRUST_IPYTHON_NOTEBOOK_CONVERSION=True \ - GALAXY_CONFIG_TOOLFORM_UPGRADE=True \ + GALAXY_CONFIG_TRUST_JUPYTER_NOTEBOOK_CONVERSION=True \ GALAXY_CONFIG_SANITIZE_ALL_HTML=False \ - GALAXY_CONFIG_TOOLFORM_UPGRADE=True \ GALAXY_CONFIG_WELCOME_URL=$GALAXY_CONFIG_DIR/web/welcome.html \ GALAXY_CONFIG_OVERRIDE_DEBUG=False \ GALAXY_CONFIG_ENABLE_QUOTAS=True \ + GALAXY_CONFIG_GALAXY_INFRASTRUCTURE_URL=http://$GALAXY_DOMAIN \ + GALAXY_CONFIG_OUTPUTS_TO_WORKING_DIRECTORY=True \ # We need to set $HOME for some Tool Shed tools (e.g Perl libs with $HOME/.cpan) HOME=$GALAXY_HOME \ GALAXY_CONDA_PREFIX=$GALAXY_CONFIG_TOOL_DEPENDENCY_DIR/_conda @@ -275,16 +298,16 @@ ADD welcome.html $GALAXY_CONFIG_DIR/web/welcome.html # && . $GALAXY_VIRTUAL_ENV/bin/activate \ # && python ./scripts/manage_tool_dependencies.py -c "$GALAXY_CONFIG_FILE" init_if_needed \ # # cleanup dance -# && find $GALAXY_ROOT/ -name '*.pyc' -delete \ +# && find $GALAXY_ROOT_DIR/ -name '*.pyc' -delete \ # && find /usr/lib/ -name '*.pyc' -delete \ # && find $GALAXY_CONDA_PREFIX/ -name '*.pyc' -delete \ # && find $GALAXY_VIRTUAL_ENV -name '*.pyc' -delete \ -# && rm -rf /tmp/* $GALAXY_ROOT/client/node_modules/ $GALAXY_VIRTUAL_ENV/src/ /home/galaxy/.cache/ /home/galaxy/.npm +# && rm -rf /tmp/* $GALAXY_ROOT_DIR/client/node_modules/ $GALAXY_VIRTUAL_ENV/src/ /home/galaxy/.cache/ /home/galaxy/.npm # Install all required Node dependencies. This is required to get proxy support to work for Interactive Environments -#cd $GALAXY_ROOT/lib/galaxy/web/proxy/js && \ +#cd $GALAXY_ROOT_DIR/lib/galaxy/web/proxy/js && \ #npm install && \ -#rm -rf ~/.cache/ $GALAXY_ROOT/client/node_modules/ +#rm -rf ~/.cache/ $GALAXY_ROOT_DIR/client/node_modules/ # Switch back to User root USER root @@ -293,6 +316,7 @@ USER root # Activate additional Tool Sheds # Activate the Test Tool Shed during runtime, useful for testing repositories. ADD ./tool_sheds_conf.xml $GALAXY_HOME/tool_sheds_conf.xml +RUN chown $GALAXY_USER:$GALAXY_USER $GALAXY_HOME/tool_sheds_conf.xml && chmod 0644 $GALAXY_HOME/tool_sheds_conf.xml # Script that enables easier downstream installation of tools (e.g. for different Galaxy Docker flavours) ADD install_tools_wrapper.sh /usr/bin/install-tools @@ -309,20 +333,19 @@ RUN echo "DISCARD_SESSION_KEYRING_ON_STARTUP=False" > /etc/condor/condor_config. ADD https://github.com/krallin/tini/releases/download/v0.18.0/tini /sbin/tini RUN chmod +x /sbin/tini -# https://stackoverflow.com/questions/62250160/uwsgi-runtimeerror-cannot-release-un-acquired-lock -ADD run.sh $GALAXY_ROOT/run.sh -RUN chmod +x $GALAXY_ROOT/run.sh && sed -i 's/py-call-osafterfork.*//g' /etc/galaxy/galaxy.yml +ADD run.sh $GALAXY_ROOT_DIR/run.sh +RUN chmod +x $GALAXY_ROOT_DIR/run.sh && \ + chown $GALAXY_USER:$GALAXY_USER $GALAXY_ROOT_DIR/run.sh # This needs to happen here and not above, otherwise the Galaxy start # (without running the startup.sh script) will crash because integrated_tool_panel.xml could not be found. ENV GALAXY_CONFIG_INTEGRATED_TOOL_PANEL_CONFIG $EXPORT_DIR/galaxy-central/integrated_tool_panel.xml -# Expose port 80, 443 (webserver), 21 (FTP server), 8800 (Proxy), 9002 (supvisord web app) -EXPOSE :21 -EXPOSE :80 -EXPOSE :443 -EXPOSE :8800 -EXPOSE :9002 +# Expose port 80, 443 (webserver), 21 (FTP server), 9002 (supvisord web app) +EXPOSE 21 +EXPOSE 80 +EXPOSE 443 +EXPOSE 9002 # Mark folders as imported from the host. VOLUME ["/export/", "/data/", "/var/lib/docker"] @@ -332,10 +355,15 @@ ENV SUPERVISOR_POSTGRES_AUTOSTART=True \ SUPERVISOR_MANAGE_POSTGRES=True \ SUPERVISOR_MANAGE_CRON=True \ SUPERVISOR_MANAGE_PROFTP=True \ - SUPERVISOR_MANAGE_REPORTS=True \ - SUPERVISOR_MANAGE_IE_PROXY=True \ SUPERVISOR_MANAGE_CONDOR=True \ SUPERVISOR_MANAGE_SLURM= \ + SUPERVISOR_MANAGE_RABBITMQ=True \ + SUPERVISOR_MANAGE_REDIS=True \ + SUPERVISOR_MANAGE_FLOWER=True \ + GRAVITY_MANAGE_CELERY=True \ + GRAVITY_MANAGE_GX_IT_PROXY=True \ + GRAVITY_MANAGE_TUSD=True \ + GRAVITY_MANAGE_REPORTS=True \ HOST_DOCKER_LEGACY= \ GALAXY_EXTRAS_CONFIG_POSTGRES=True \ STARTUP_EXPORT_USER_FILES=True diff --git a/galaxy/install_biojs_vis.sh b/galaxy/install_biojs_vis.sh index 396bc423d..b5a593579 100644 --- a/galaxy/install_biojs_vis.sh +++ b/galaxy/install_biojs_vis.sh @@ -1,12 +1,14 @@ #!/bin/bash +. $GALAXY_VIRTUAL_ENV/bin/activate + mkdir ./biojs_install_temp cd ./biojs_install_temp npm install biojs2galaxy for vis in "$@"; do echo "Installing BioJS Visualization:\t $vis" - ./node_modules/biojs2galaxy/biojs2galaxy.js $vis -o $GALAXY_ROOT/config/plugins/visualizations/ + ./node_modules/biojs2galaxy/biojs2galaxy.js $vis -o $GALAXY_ROOT_DIR/config/plugins/visualizations/ done cd .. diff --git a/galaxy/install_tools_wrapper.sh b/galaxy/install_tools_wrapper.sh index b1cd5b67b..197553419 100644 --- a/galaxy/install_tools_wrapper.sh +++ b/galaxy/install_tools_wrapper.sh @@ -35,7 +35,7 @@ else ./run.sh -d $install_log --pidfile galaxy_install.pid --http-timeout 3000" galaxy_install_pid=`cat galaxy_install.pid` - galaxy-wait -g http://localhost:$PORT -v --timeout 120 + galaxy-wait -g http://localhost:$PORT -v --timeout 600 fi # Create the admin user if not already done @@ -44,7 +44,7 @@ fi if [[ ! -z $GALAXY_DEFAULT_ADMIN_USER ]] then ( - cd $GALAXY_ROOT + cd $GALAXY_ROOT_DIR . $GALAXY_VIRTUAL_ENV/bin/activate echo "Creating admin user $GALAXY_DEFAULT_ADMIN_USER with key $GALAXY_DEFAULT_ADMIN_KEY and password $GALAXY_DEFAULT_ADMIN_PASSWORD if not existing" python /usr/local/bin/create_galaxy_user.py --user "$GALAXY_DEFAULT_ADMIN_EMAIL" --password "$GALAXY_DEFAULT_ADMIN_PASSWORD" \ diff --git a/galaxy/postgresql_provision.yml b/galaxy/postgresql_provision.yml index 01198c9f5..3f9b31b1b 100644 --- a/galaxy/postgresql_provision.yml +++ b/galaxy/postgresql_provision.yml @@ -3,7 +3,7 @@ remote_user: root vars: postgresql_backup_local_dir: /export/postgresql_backup/ - postgresql_version: 11 + postgresql_version: 15 postgresql_flavor: pgdg postgresql_conf: listen_addresses: "'*'" diff --git a/galaxy/provision.yml b/galaxy/provision.yml index 5b28fa1dc..bde9ae7ed 100644 --- a/galaxy/provision.yml +++ b/galaxy/provision.yml @@ -1,7 +1,7 @@ - hosts: localhost connection: local vars: - postgresql_version: 11 + postgresql_version: 15 roles: - role: galaxyprojectdotorg.galaxyextras tags: galaxyextras diff --git a/galaxy/reports.yml.sample b/galaxy/reports.yml.sample new file mode 100644 index 000000000..f7f2172e0 --- /dev/null +++ b/galaxy/reports.yml.sample @@ -0,0 +1,39 @@ +reports: + + # Verbosity of console log messages. Acceptable values can be found + # here: https://docs.python.org/library/logging.html#logging-levels + #log_level: DEBUG + + # Database connection. Galaxy Reports are intended for production + # Galaxy instances, so sqlite (and the default value below) is not + # supported. An SQLAlchemy connection string should be used specify an + # external database. + #database_connection: postgresql://galaxy:galaxy@localhost:5432/galaxy?client_encoding=utf8 + + # Where dataset files are stored. + #file_path: database/files + + # Where temporary files are stored. + #new_file_path: database/tmp + + # Mako templates are compiled as needed and cached for reuse, this + # directory is used for the cache + #template_cache_path: database/compiled_templates/reports + + # Write thread status periodically to 'heartbeat.log' (careful, uses + # disk space rapidly!) + #use_heartbeat: true + + # Mail + #smtp_server: yourserver@yourfacility.edu + + # Mail + #error_email_to: your_bugs@bx.psu.edu + + # Enables GDPR Compliance mode. This makes several changes to the way + # Galaxy logs and exposes data externally such as removing + # emails/usernames from logs and bug reports. + # You are responsible for removing personal data from backups. + # Please read the GDPR section under the special topics area of the + # admin documentation. + #enable_beta_gdpr: false diff --git a/galaxy/reports_wsgi.ini.sample b/galaxy/reports_wsgi.ini.sample deleted file mode 100644 index 8bac450a0..000000000 --- a/galaxy/reports_wsgi.ini.sample +++ /dev/null @@ -1,100 +0,0 @@ -# ---- HTTP Server ---------------------------------------------------------- - -[server:main] - -use = egg:Paste#http -port = 9001 -host = 127.0.0.1 -use_threadpool = true -threadpool_workers = 10 - -# ---- Filters -------------------------------------------------------------- - -# Filters sit between Galaxy and the HTTP server. - -# These filters are disabled by default. They can be enabled with -# 'filter-with' in the [app:main] section below. - -# Define the proxy-prefix filter. -[filter:proxy-prefix] -use = egg:PasteDeploy#prefix -prefix = /reports - -# ---- Galaxy Webapps Report Interface ------------------------------------------------- - -[app:main] - -# -- Application and filtering - -# If running behind a proxy server and Galaxy is served from a subdirectory, -# enable the proxy-prefix filter and set the prefix in the -# [filter:proxy-prefix] section above. -filter-with = proxy-prefix - -# If proxy-prefix is enabled and you're running more than one Galaxy instance -# behind one hostname, you will want to set this to the same path as the prefix -# in the filter above. This value becomes the "path" attribute set in the -# cookie so the cookies from each instance will not clobber each other. -#cookie_path = None - -# -- Report - -# Specifies the factory for the universe WSGI application -paste.app_factory = galaxy.webapps.reports.buildapp:app_factory -log_level = DEBUG - -# Database connection -# Galaxy reports are intended for production Galaxy instances, so sqlite is not supported. -# You may use a SQLAlchemy connection string to specify an external database. -# database_connection = postgres:///galaxy_test?user=postgres&password=postgres - -# Where dataset files are saved -#file_path = database/files -# Temporary storage for additional datasets, this should be shared through the cluster -#new_file_path = database/tmp - -# Mako templates are compiled as needed and cached for reuse, this directory is -# used for the cache -#template_cache_path = database/compiled_templates/reports - -# Session support (beaker) -use_beaker_session = True -session_type = memory -session_data_dir = %(here)s/database/beaker_sessions -session_key = galaxysessions -session_secret = changethisinproduction - -# Configuration for debugging middleware -# debug = true -use_lint = false - -# NEVER enable this on a public site (even test or QA) -# use_interactive = true - -# path to sendmail -sendmail_path = /usr/sbin/sendmail - -# Address to join mailing list -mailing_join_addr = galaxy-user-join@bx.psu.edu - -# Write thread status periodically to 'heartbeat.log' (careful, uses disk space rapidly!) -## use_heartbeat = True - -# Profiling middleware (cProfile based) -## use_profile = True - -# Mail -# smtp_server = yourserver@yourfacility.edu -# error_email_to = your_bugs@bx.psu.edu - -# Use the new iframe / javascript based layout -use_new_layout = true - -# Serving static files (needed if running standalone) -# static_enabled = True -# static_cache_time = 360 -# static_dir = %(here)s/static/ -# static_images_dir = %(here)s/static/images -# static_favicon_dir = %(here)s/static/favicon.ico -# static_scripts_dir = %(here)s/static/scripts/ -# static_style_dir = %(here)s/static/june_2007_style/blue diff --git a/galaxy/roles/galaxy-postgresql b/galaxy/roles/galaxy-postgresql index 1507fbe5e..52220795d 160000 --- a/galaxy/roles/galaxy-postgresql +++ b/galaxy/roles/galaxy-postgresql @@ -1 +1 @@ -Subproject commit 1507fbe5eda946fc9f4c9aabef4e73a9b11a8315 +Subproject commit 52220795dd3aa04c026846442c08e0561f720760 diff --git a/galaxy/run.sh b/galaxy/run.sh index fa54e0f25..db6a9c2a5 100755 --- a/galaxy/run.sh +++ b/galaxy/run.sh @@ -11,7 +11,6 @@ cd "$(dirname "$0")" . ./scripts/common_startup_functions.sh - # If there is a file that defines a shell environment specific to this # instance of Galaxy, source the file. if [ -z "$GALAXY_LOCAL_ENV_FILE" ]; @@ -35,64 +34,25 @@ run_common_start_up setup_python - if [ ! -z "$GALAXY_RUN_WITH_TEST_TOOLS" ]; then - export GALAXY_CONFIG_OVERRIDE_TOOL_CONFIG_FILE="$(pwd)/test/functional/tools/samples_tool_conf.xml" + export GALAXY_CONFIG_OVERRIDE_TOOL_CONFIG_FILE="$(pwd)/test/functional/tools/sample_tool_conf.xml" export GALAXY_CONFIG_ENABLE_BETA_WORKFLOW_MODULES="true" export GALAXY_CONFIG_OVERRIDE_ENABLE_BETA_TOOL_FORMATS="true" export GALAXY_CONFIG_INTERACTIVETOOLS_ENABLE="true" export GALAXY_CONFIG_OVERRIDE_WEBHOOKS_DIR="test/functional/webhooks" + export GALAXY_CONFIG_OVERRIDE_PANEL_VIEWS_DIR="$(pwd)/test/integration/panel_views_1/" fi set_galaxy_config_file_var - if [ "$INITIALIZE_TOOL_DEPENDENCIES" -eq 1 ]; then # Install Conda environment if needed. python ./scripts/manage_tool_dependencies.py init_if_needed fi -[ -n "$GALAXY_UWSGI" ] && APP_WEBSERVER='uwsgi' find_server "${GALAXY_CONFIG_FILE:-none}" galaxy -server_args=`echo $server_args | sed 's/--py-call-osafterfork//g'` - -if [ "$run_server" = "python" -a -n "$GALAXY_RUN_ALL" ]; then - servers=$(sed -n 's/^\[server:\(.*\)\]/\1/ p' "$GALAXY_CONFIG_FILE" | xargs echo) - if [ -z "$stop_daemon_arg_set" -a -z "$daemon_or_restart_arg_set" ]; then - echo "ERROR: \$GALAXY_RUN_ALL cannot be used without the '--daemon', '--stop-daemon', 'restart', 'start' or 'stop' arguments to run.sh" - exit 1 - fi - for server in $servers; do - echo "Executing: python $server_args --server-name=\"$server\" --pid-file=\"$server.pid\" --log-file=\"$server.log\"" - eval python $server_args --server-name="$server" --pid-file="$server.pid" --log-file="$server.log" - if [ -n "$wait_arg_set" -a -n "$daemon_or_restart_arg_set" ]; then - while true; do - sleep 1 - # Grab the current pid from the pid file and remove any trailing space - if ! current_pid_in_file=$(sed -e 's/[[:space:]]*$//' "$server.pid"); then - echo "A Galaxy process died, interrupting" >&2 - exit 1 - fi - if [ -n "$current_pid_in_file" ]; then - echo "Found PID $current_pid_in_file in '$server.pid', monitoring '$server.log'" - else - echo "No PID found in '$server.pid' yet" - continue - fi - # Search for all pids in the logs and tail for the last one - latest_pid=$(grep '^Starting server in PID [0-9]\+\.$' "$server.log" | sed 's/^Starting server in PID \([0-9]\{1,\}\).$/\1/' | tail -n 1) - # If they're equivalent, then the current pid file agrees with our logs - # and we've succesfully started - [ -n "$latest_pid" ] && [ "$latest_pid" -eq "$current_pid_in_file" ] && break - done - echo - fi - done -else - - echo "Executing: $run_server $server_args" - # args are properly quoted so use eval - eval $run_server $server_args -fi +echo "Executing: $run_server $server_args" +# args are properly quoted so use eval +eval GALAXY_ROOT_DIR="." $run_server $server_args diff --git a/galaxy/setup_postgresql.py b/galaxy/setup_postgresql.py index 66b4aea00..238469b67 100644 --- a/galaxy/setup_postgresql.py +++ b/galaxy/setup_postgresql.py @@ -9,31 +9,34 @@ def pg_ctl(database_path, database_version, mod='start'): Start/Stop PostgreSQL with variable data_directory. mod = [start, end, restart, reload] """ - pg_conf = '/etc/postgresql/%s/main/postgresql.conf' % database_version - new_data_directory = "'%s'" % database_path - cmd = 'sed -i "s|data_directory = .*|data_directory = %s|g" %s' % (new_data_directory, pg_conf) + pg_conf = f'/etc/postgresql/{database_version}/main/postgresql.conf' + new_data_directory = f"'{database_path}'" + cmd = f'sed -i "s|data_directory = .*|data_directory = {new_data_directory}|g" {pg_conf}' subprocess.call(cmd, shell=True) - subprocess.call('service postgresql %s' % mod, shell=True) + subprocess.call(f'service postgresql {mod}', shell=True) def set_pg_permission(database_path): """ Set the correct permissions for a newly created PostgreSQL data_directory. """ - subprocess.call('chown -R postgres:postgres %s' % database_path, shell=True) - subprocess.call('chmod -R 0700 %s' % database_path, shell=True) + subprocess.call(f'chown -R postgres:postgres {database_path}', shell=True) + subprocess.call(f'chmod -R 0700 {database_path}', shell=True) def create_pg_db(user, password, database, database_path, database_version): """ Initialize PostgreSQL Database, add database user und create the Galaxy Database. """ - pg_bin = "/usr/lib/postgresql/%s/bin/" % database_version + pg_bin = f"/usr/lib/postgresql/{database_version}/bin/" os.makedirs(database_path) set_pg_permission(database_path) # initialize a new postgres database - subprocess.call("su - postgres -c '%s --auth=trust --encoding UTF8 --pgdata=%s'" % (os.path.join(pg_bin, 'initdb'), - database_path), shell=True) + subprocess.call( + f"su - postgres -c '{os.path.join(pg_bin, 'initdb')} " + f"--auth=trust --encoding UTF8 --pgdata={database_path}'", + shell=True + ) shutil.copy('/etc/ssl/certs/ssl-cert-snakeoil.pem', os.path.join(database_path, 'server.crt')) shutil.copy('/etc/ssl/private/ssl-cert-snakeoil.key', os.path.join(database_path, 'server.key')) @@ -43,10 +46,10 @@ def create_pg_db(user, password, database, database_path, database_version): # change data_directory in postgresql.conf and start the service with the new location pg_ctl(database_path, database_version, 'start') - subprocess.call("""su - postgres -c "psql --command \\"CREATE USER %s WITH SUPERUSER PASSWORD '%s'\\";" - """ % (user, password), shell=True) + subprocess.call(f"""su - postgres -c "psql --command \\"CREATE USER {user} WITH SUPERUSER PASSWORD '{password}'\\";" + """, shell=True) - subprocess.call("su - postgres -c 'createdb -O %s %s'" % (user, database), shell=True) + subprocess.call(f"su - postgres -c 'createdb -O {user} {database}'", shell=True) subprocess.call('service postgresql stop', shell=True) @@ -56,19 +59,19 @@ def create_pg_db(user, password, database, database_path, database_version): parser.add_argument("--dbuser", required=True, help="Username of the Galaxy Database Administrator. That name will be specified in the " - "universe_wsgi.xml file.") + "galaxy.yml file.") parser.add_argument("--dbpassword", required=True, help="Password of the Galaxy Database Administrator. That name will be specified in the " - "universe_wsgi.xml file.") + "galaxy.yml file.") parser.add_argument("--db-name", dest='db_name', required=True, - help="Galaxy Database name. That name will be specified in the universe_wsgi.xml file.") + help="Galaxy Database name. That name will be specified in the galaxy.yml file.") parser.add_argument("--dbpath", help="Galaxy Database path.") - parser.add_argument("--dbversion", default='11', + parser.add_argument("--dbversion", default='15', help="Postgresql server major version.") options = parser.parse_args() diff --git a/galaxy/startup.sh b/galaxy/startup.sh index 168c9ee64..acf283c58 100755 --- a/galaxy/startup.sh +++ b/galaxy/startup.sh @@ -5,16 +5,13 @@ if [ -d "/export/galaxy-central/tool_deps/" ] && [ ! -L "/export/galaxy-central/tool_deps/" ]; then mkdir -p /export/tool_deps/ mv /export/galaxy-central/tool_deps /export/ - ln -s /export/tool_deps/ $GALAXY_ROOT/ + ln -s /export/tool_deps/ $GALAXY_ROOT_DIR/ fi # This is needed for Docker compose to have a unified alias for the main container. # Modifying /etc/hosts can only happen during runtime not during build-time echo "127.0.0.1 galaxy" >> /etc/hosts -# Set number of Galaxy handlers via GALAXY_HANDLER_NUMPROCS or default to 2 -ansible localhost -m ini_file -a "dest=/etc/supervisor/conf.d/galaxy.conf section=program:handler option=numprocs value=${GALAXY_HANDLER_NUMPROCS:-2}" &> /dev/null - # If the Galaxy config file is not in the expected place, copy from the sample # and hope for the best (that the admin has done all the setup through env vars.) if [ ! -f $GALAXY_CONFIG_FILE ] @@ -23,47 +20,96 @@ if [ ! -f $GALAXY_CONFIG_FILE ] cp /export/config/galaxy${GALAXY_CONFIG_FILE: -4}.sample $GALAXY_CONFIG_FILE fi +# Set number of Gunicorn workers via GUNICORN_WORKERS or default to 2 +python3 /usr/local/bin/update_yaml_value "${GRAVITY_CONFIG_FILE}" "gravity.gunicorn.workers" "${GUNICORN_WORKERS:-2}" &> /dev/null + +# Set number of Celery workers via CELERY_WORKERS or default to 2 +python3 /usr/local/bin/update_yaml_value "${GRAVITY_CONFIG_FILE}" "gravity.celery.concurrency" "${CELERY_WORKERS:-2}" &> /dev/null + +# Set number of Galaxy handlers via GALAXY_HANDLER_NUMPROCS or default to 2 +python3 /usr/local/bin/update_yaml_value "${GRAVITY_CONFIG_FILE}" "gravity.handlers.handler.processes" "${GALAXY_HANDLER_NUMPROCS:-2}" &> /dev/null + +# Initialize variables for optional ansible parameters +ANSIBLE_EXTRA_VARS_HTTPS_PROXY_PREFIX="" +ANSIBLE_TAG_HTTPS_PROXY_PREFIX="" + # Configure proxy prefix filtering if [[ ! -z $PROXY_PREFIX ]] - then - if [ ${GALAXY_CONFIG_FILE: -4} == ".ini" ] - then - ansible localhost -m ini_file -a "dest=${GALAXY_CONFIG_FILE} section=filter:proxy-prefix option=prefix value=${PROXY_PREFIX}" &> /dev/null - ansible localhost -m ini_file -a "dest=${GALAXY_CONFIG_FILE} section=app:main option=filter-with value=proxy-prefix" &> /dev/null - else - ansible localhost -m lineinfile -a "path=${GALAXY_CONFIG_FILE} regexp='^ module:' state=absent" &> /dev/null - ansible localhost -m lineinfile -a "path=${GALAXY_CONFIG_FILE} regexp='^ socket:' state=absent" &> /dev/null - ansible localhost -m lineinfile -a "path=${GALAXY_CONFIG_FILE} regexp='^ mount:' state=absent" &> /dev/null - ansible localhost -m lineinfile -a "path=${GALAXY_CONFIG_FILE} regexp='^ manage-script-name:' state=absent" &> /dev/null - ansible localhost -m lineinfile -a "path=${GALAXY_CONFIG_FILE} insertafter='^uwsgi:' line=' manage-script-name: true'" &> /dev/null - ansible localhost -m lineinfile -a "path=${GALAXY_CONFIG_FILE} insertafter='^uwsgi:' line=' mount: ${PROXY_PREFIX}=galaxy.webapps.galaxy.buildapp:uwsgi_app()'" &> /dev/null - ansible localhost -m lineinfile -a "path=${GALAXY_CONFIG_FILE} insertafter='^uwsgi:' line=' socket: unix:///srv/galaxy/var/uwsgi.sock'" &> /dev/null - - # Also set SCRIPT_NAME. It's not always necessary due to manage-script-name: true in galaxy.yml, but it makes life easier in this container + it does no harm - ansible localhost -m lineinfile -a "path=/etc/nginx/conf.d/uwsgi.conf regexp='^ uwsgi_param SCRIPT_NAME' state=absent" &> /dev/null - ansible localhost -m lineinfile -a "path=/etc/nginx/conf.d/uwsgi.conf insertafter='^ include uwsgi_params' line=' uwsgi_param SCRIPT_NAME ${PROXY_PREFIX};'" &> /dev/null - fi +then + echo "Configuring with proxy prefix: $PROXY_PREFIX" + export GALAXY_CONFIG_GALAXY_URL_PREFIX="$PROXY_PREFIX" + export GALAXY_CONFIG_INTERACTIVETOOLS_BASE_PATH="$PROXY_PREFIX" + + python3 /usr/local/bin/update_yaml_value "${GRAVITY_CONFIG_FILE}" "gravity.reports.url_prefix" "$PROXY_PREFIX/reports" &> /dev/null + + python3 /usr/local/bin/update_yaml_value "${GRAVITY_CONFIG_FILE}" "gravity.tusd.extra_args" "-behind-proxy -base-path $PROXY_PREFIX/api/upload/resumable_upload" &> /dev/null - ansible localhost -m ini_file -a "dest=${GALAXY_CONFIG_DIR}/reports_wsgi.ini section=filter:proxy-prefix option=prefix value=${PROXY_PREFIX}/reports" &> /dev/null - ansible localhost -m ini_file -a "dest=${GALAXY_CONFIG_DIR}/reports_wsgi.ini section=app:main option=filter-with value=proxy-prefix" &> /dev/null + ansible localhost -m replace -a "path=/etc/flower/flowerconfig.py regexp='^url_prefix.*' replace='url_prefix = \"$PROXY_PREFIX/flower\"'" &> /dev/null # Fix path to html assets ansible localhost -m replace -a "dest=$GALAXY_CONFIG_DIR/web/welcome.html regexp='(href=\"|\')[/\\w]*(/static)' replace='\\1${PROXY_PREFIX}\\2'" &> /dev/null - + # Set some other vars based on that prefix - if [ "x$GALAXY_CONFIG_COOKIE_PATH" == "x" ] - then - export GALAXY_CONFIG_COOKIE_PATH="$PROXY_PREFIX" - fi - if [ "x$GALAXY_CONFIG_DYNAMIC_PROXY_PREFIX" == "x" ] - then + if [[ -z "$GALAXY_CONFIG_DYNAMIC_PROXY_PREFIX" ]] + then export GALAXY_CONFIG_DYNAMIC_PROXY_PREFIX="$PROXY_PREFIX/gie_proxy" fi - # Change the defaults nginx upload/x-accel paths - if [ "$GALAXY_CONFIG_NGINX_UPLOAD_PATH" == "/_upload" ] - then - export GALAXY_CONFIG_NGINX_UPLOAD_PATH="${PROXY_PREFIX}${GALAXY_CONFIG_NGINX_UPLOAD_PATH}" + if [[ ! -z $GALAXY_CONFIG_GALAXY_INFRASTRUCTURE_URL ]] + then + export GALAXY_CONFIG_GALAXY_INFRASTRUCTURE_URL="${GALAXY_CONFIG_GALAXY_INFRASTRUCTURE_URL}${PROXY_PREFIX}" + fi + + if [[ "$USE_HTTPS_LETSENCRYPT" != "False" || "$USE_HTTPS" != "False" ]] + then + ANSIBLE_EXTRA_VARS_HTTPS_PROXY_PREFIX="--extra-vars nginx_prefix_location=$PROXY_PREFIX" + ANSIBLE_TAG_HTTPS_PROXY_PREFIX="proxy_prefix" + else + ansible-playbook -c local /ansible/provision.yml \ + --extra-vars nginx_prefix_location="$PROXY_PREFIX" \ + --tags proxy_prefix + fi +fi + +if [ "$USE_HTTPS_LETSENCRYPT" != "False" ] +then + echo "Settting up letsencrypt" + ansible-playbook -c local /ansible/provision.yml \ + --extra-vars galaxy_extras_config_ssl=True \ + --extra-vars galaxy_extras_config_ssl_method=letsencrypt \ + --extra-vars galaxy_extras_galaxy_domain="$GALAXY_DOMAIN" \ + $ANSIBLE_EXTRA_VARS_HTTPS_PROXY_PREFIX \ + --tags https,$ANSIBLE_TAG_HTTPS_PROXY_PREFIX +fi +if [ "$USE_HTTPS" != "False" ] +then + if [ -f /export/server.key -a -f /export/server.crt ] + then + echo "Copying SSL keys" + ansible-playbook -c local /ansible/provision.yml \ + --extra-vars galaxy_extras_config_ssl=True \ + --extra-vars galaxy_extras_config_ssl_method=own \ + --extra-vars src_nginx_ssl_certificate_key=/export/server.key \ + --extra-vars src_nginx_ssl_certificate=/export/server.crt \ + $ANSIBLE_EXTRA_VARS_HTTPS_PROXY_PREFIX \ + --tags https,$ANSIBLE_TAG_HTTPS_PROXY_PREFIX + else + echo "Setting up self-signed SSL keys" + ansible-playbook -c local /ansible/provision.yml \ + --extra-vars galaxy_extras_config_ssl=True \ + --extra-vars galaxy_extras_config_ssl_method=self-signed \ + $ANSIBLE_EXTRA_VARS_HTTPS_PROXY_PREFIX \ + --tags https,$ANSIBLE_TAG_HTTPS_PROXY_PREFIX + fi +fi + +if [[ "$USE_HTTPS_LETSENCRYPT" != "False" || "$USE_HTTPS" != "False" ]] +then + # Check if GALAXY_CONFIG_GALAXY_INFRASTRUCTURE_URL has http but not https + if [[ $GALAXY_CONFIG_GALAXY_INFRASTRUCTURE_URL == "http:"* ]] + then + GALAXY_CONFIG_GALAXY_INFRASTRUCTURE_URL=${GALAXY_CONFIG_GALAXY_INFRASTRUCTURE_URL/http:/https:} + export GALAXY_CONFIG_GALAXY_INFRASTRUCTURE_URL fi fi @@ -79,6 +125,18 @@ if [[ ! -z $DISABLE_REPORTS_AUTH ]] cp /etc/nginx/conf.d/reports_auth.conf.source /etc/nginx/conf.d/reports_auth.conf fi +# Disable authentication of flower +if [[ ! -z $DISABLE_FLOWER_AUTH ]] + then + # disable authentification + echo "Disable flower authentification " + echo "" > /etc/nginx/conf.d/flower_auth.conf + else + # enable authentification + echo "Enable flower authentification " + cp /etc/nginx/conf.d/flower_auth.conf.source /etc/nginx/conf.d/flower_auth.conf +fi + # Try to guess if we are running under --privileged mode if [[ ! -z $HOST_DOCKER_LEGACY ]]; then if mount | grep "/proc/kcore"; then @@ -98,7 +156,7 @@ else fi fi -cd $GALAXY_ROOT +cd $GALAXY_ROOT_DIR . $GALAXY_VIRTUAL_ENV/bin/activate if $PRIVILEGED; then @@ -124,16 +182,18 @@ if [[ ! -z $LOAD_GALAXY_CONDITIONAL_DEPENDENCIES ]] then echo "Installing optional dependencies in galaxy virtual environment..." : ${GALAXY_WHEELS_INDEX_URL:="https://wheels.galaxyproject.org/simple"} + : ${PYPI_INDEX_URL:="https://pypi.python.org/simple"} GALAXY_CONDITIONAL_DEPENDENCIES=$(PYTHONPATH=lib python -c "import galaxy.dependencies; print('\n'.join(galaxy.dependencies.optional('$GALAXY_CONFIG_FILE')))") - [ -z "$GALAXY_CONDITIONAL_DEPENDENCIES" ] || echo "$GALAXY_CONDITIONAL_DEPENDENCIES" | pip install -q -r /dev/stdin --index-url "${GALAXY_WHEELS_INDEX_URL}" + [ -z "$GALAXY_CONDITIONAL_DEPENDENCIES" ] || echo "$GALAXY_CONDITIONAL_DEPENDENCIES" | pip install -q -r /dev/stdin --index-url "${GALAXY_WHEELS_INDEX_URL}" --extra-index-url "${PYPI_INDEX_URL}" fi if [[ ! -z $LOAD_GALAXY_CONDITIONAL_DEPENDENCIES ]] && [[ ! -z $LOAD_PYTHON_DEV_DEPENDENCIES ]] then echo "Installing development requirements in galaxy virtual environment..." : ${GALAXY_WHEELS_INDEX_URL:="https://wheels.galaxyproject.org/simple"} + : ${PYPI_INDEX_URL:="https://pypi.python.org/simple"} dev_requirements='./lib/galaxy/dependencies/dev-requirements.txt' - [ -f $dev_requirements ] && pip install -q -r $dev_requirements --index-url "${GALAXY_WHEELS_INDEX_URL}" + [ -f $dev_requirements ] && pip install -q -r $dev_requirements --index-url "${GALAXY_WHEELS_INDEX_URL}" --extra-index-url "${PYPI_INDEX_URL}" fi # Enable Test Tool Shed @@ -147,7 +207,7 @@ fi if [[ ! -z $BARE ]] then echo "Remove all tools from the tool_conf.xml file." - export GALAXY_CONFIG_TOOL_CONFIG_FILE=config/shed_tool_conf.xml,$GALAXY_ROOT/test/functional/tools/upload_tool_conf.xml + export GALAXY_CONFIG_TOOL_CONFIG_FILE=$GALAXY_ROOT_DIR/test/functional/tools/upload_tool_conf.xml fi # If auto installing conda envs, make sure bcftools is installed for __set_metadata__ tool @@ -210,8 +270,8 @@ fi # Copy or link the slurm/munge config files if [ -e /export/slurm.conf ] then - rm -f /etc/slurm-llnl/slurm.conf - ln -s /export/slurm.conf /etc/slurm-llnl/slurm.conf + rm -f /etc/slurm/slurm.conf + ln -s /export/slurm.conf /etc/slurm/slurm.conf else # Configure SLURM with runtime hostname. # Use absolute path to python so virtualenv is not used. @@ -234,11 +294,26 @@ fi # Waits until postgres is ready function wait_for_postgres { echo "Checking if database is up and running" - until /usr/local/bin/check_database.py 2>&1 >/dev/null; do sleep 1; echo "Waiting for database"; done + until /usr/local/bin/check_database.py 2>&1 >/dev/null; do sleep 5; echo "Waiting for database"; done echo "Database connected" } -# $NONUSE can be set to include cron, proftp, reports or nodejs +# Waits until rabbitmq is ready +function wait_for_rabbitmq { + echo "Checking if RabbitMQ is up and running" + until rabbitmqctl status 2>&1 >/dev/null; do sleep 5; echo "Waiting for RabbitMQ"; done + echo "RabbitMQ is ready" +} + +# Waits until docker daemon is ready +function wait_for_docker { + echo "Checking if docker daemon is up and running" + until docker version 2>&1 >/dev/null; do sleep 5; echo "Waiting for docker daemon"; done + echo "Docker daemon is ready" +} + +# $NONUSE can be set to include postgres, cron, proftp, reports, nodejs, condor, slurmd, slurmctld, +# celery, rabbitmq, redis, flower or tusd # if included we will _not_ start these services. function start_supervisor { supervisord -c /etc/supervisor/supervisord.conf @@ -277,22 +352,6 @@ function start_supervisor { fi fi - if [[ ! -z $SUPERVISOR_MANAGE_REPORTS ]]; then - if [[ $NONUSE != *"reports"* ]] - then - echo "Starting Galaxy reports webapp" - supervisorctl start reports - fi - fi - - if [[ ! -z $SUPERVISOR_MANAGE_IE_PROXY ]]; then - if [[ $NONUSE != *"nodejs"* ]] - then - echo "Starting nodejs" - supervisorctl start galaxy:galaxy_nodejs_proxy - fi - fi - if [[ ! -z $SUPERVISOR_MANAGE_CONDOR ]]; then if [[ $NONUSE != *"condor"* ]] then @@ -328,6 +387,95 @@ function start_supervisor { # We need to run munged regardless mkdir -p /var/run/munge && /usr/sbin/munged -f fi + + if [[ ! -z $SUPERVISOR_MANAGE_RABBITMQ ]]; then + if [[ $NONUSE != *"rabbitmq"* ]] + then + echo "Starting rabbitmq" + supervisorctl start rabbitmq + + wait_for_rabbitmq + echo "Configuring rabbitmq users" + ansible-playbook -c local /usr/local/bin/configure_rabbitmq_users.yml &> /dev/null + fi + fi + + if [[ ! -z $SUPERVISOR_MANAGE_REDIS ]]; then + if [[ $NONUSE != *"redis"* ]] + then + echo "Starting redis" + supervisorctl start redis + fi + fi + + if [[ ! -z $SUPERVISOR_MANAGE_FLOWER ]]; then + if [[ $NONUSE != *"flower"* && $NONUSE != *"celery"* && $NONUSE != *"rabbitmq"* ]] + then + echo "Starting flower" + supervisorctl start flower + fi + fi +} + +function start_gravity { + if [[ ! -z $GRAVITY_MANAGE_CELERY ]]; then + if [[ $NONUSE == *"celery"* ]] + then + echo "Disabling Galaxy celery app" + python3 /usr/local/bin/update_yaml_value "${GRAVITY_CONFIG_FILE}" "gravity.celery.enable" "false" &> /dev/null + python3 /usr/local/bin/update_yaml_value "${GRAVITY_CONFIG_FILE}" "gravity.celery.enable_beat" "false" &> /dev/null + else + export GALAXY_CONFIG_ENABLE_CELERY_TASKS='true' + if [[ $NONUSE != *"redis"* ]] + then + # Configure Galaxy to use Redis as the result backend for Celery tasks + ansible localhost -m replace -a "path=${GALAXY_CONFIG_FILE} regexp='^ #celery_conf:' replace=' celery_conf:'" &> /dev/null + ansible localhost -m replace -a "path=${GALAXY_CONFIG_FILE} regexp='^ # result_backend:.*' replace=' result_backend: redis://127.0.0.1:6379/0'" &> /dev/null + fi + fi + fi + + if [[ ! -z $GRAVITY_MANAGE_GX_IT_PROXY ]]; then + if [[ $NONUSE == *"nodejs"* ]] + then + echo "Disabling nodejs" + python3 /usr/local/bin/update_yaml_value "${GRAVITY_CONFIG_FILE}" "gravity.gx_it_proxy.enable" "false" &> /dev/null + else + # TODO: Remove this after gravity config manager is updated to handle env vars properly + ansible localhost -m replace -a "path=${GALAXY_CONFIG_FILE} regexp='^ #interactivetools_enable:.*' replace=' interactivetools_enable: true'" &> /dev/null + fi + fi + + if [[ ! -z $GRAVITY_MANAGE_TUSD ]]; then + if [[ $NONUSE == *"tusd"* ]] + then + echo "Disabling Galaxy tusd app" + python3 /usr/local/bin/update_yaml_value "${GRAVITY_CONFIG_FILE}" "gravity.tusd.enable" "false" &> /dev/null + echo "" > /etc/nginx/conf.d/delegated_uploads.conf + else + cp /etc/nginx/conf.d/delegated_uploads.conf.source /etc/nginx/conf.d/delegated_uploads.conf + + # TODO: Remove this after gravity config manager is updated to handle env vars properly + ansible localhost -m replace -a "path=${GALAXY_CONFIG_FILE} regexp='^ #galaxy_infrastructure_url:.*' replace=' galaxy_infrastructure_url: ${GALAXY_CONFIG_GALAXY_INFRASTRUCTURE_URL}'" &> /dev/null + fi + fi + + if [[ ! -z $GRAVITY_MANAGE_REPORTS ]]; then + if [[ $NONUSE == *"reports"* ]] + then + echo "Disabling Galaxy reports webapp" + python3 /usr/local/bin/update_yaml_value "${GRAVITY_CONFIG_FILE}" "gravity.reports.enable" "false" &> /dev/null + fi + fi + + if [[ $NONUSE != *"rabbitmq"* ]] + then + # Set AMQP internal connection for Galaxy + export GALAXY_CONFIG_AMQP_INTERNAL_CONNECTION="pyamqp://galaxy:galaxy@localhost:5672/galaxy_internal" + fi + + # Start galaxy services using gravity + /usr/local/bin/galaxyctl start } if [[ ! -z $SUPERVISOR_POSTGRES_AUTOSTART ]]; then @@ -339,79 +487,56 @@ if [[ ! -z $SUPERVISOR_POSTGRES_AUTOSTART ]]; then fi if $PRIVILEGED; then - echo "Enable Galaxy Interactive Environments." - export GALAXY_CONFIG_INTERACTIVE_ENVIRONMENT_PLUGINS_DIRECTORY="config/plugins/interactive_environments" + # in privileged mode autofs and CVMFS is available + # install autofs + echo "Installing autofs to enable automatic CVMFS mounts" + apt-get install autofs --no-install-recommends -y + apt-get autoremove -y && apt-get clean && rm -rf /var/lib/apt/lists/* + export GALAXY_CONFIG_TOOL_DATA_TABLE_CONFIG_PATH="/etc/galaxy/tool_data_table_conf.xml,/cvmfs/data.galaxyproject.org/byhand/location/tool_data_table_conf.xml,/cvmfs/data.galaxyproject.org/managed/location/tool_data_table_conf.xml" + + echo "Enable Galaxy Interactive Tools." + export GALAXY_CONFIG_INTERACTIVETOOLS_ENABLE=True + export GALAXY_CONFIG_TOOL_CONFIG_FILE="$GALAXY_CONFIG_TOOL_CONFIG_FILE,$GALAXY_INTERACTIVE_TOOLS_CONFIG_FILE" + + # Update domain-based interactive tools nginx configuration with the galaxy domain if provided + if [[ ! -z $GALAXY_DOMAIN ]]; then + sed -i "s/\(\.interactivetool\.\)[^;]*/\1$GALAXY_DOMAIN/g" /etc/nginx/conf.d/interactive_tools.conf + fi + if [ x$DOCKER_PARENT == "x" ]; then #build the docker in docker environment bash /root/cgroupfs_mount.sh + start_gravity start_supervisor supervisorctl start docker + wait_for_docker else #inheriting /var/run/docker.sock from parent, assume that you need to #run docker with sudo to validate echo "$GALAXY_USER ALL = NOPASSWD : ALL" >> /etc/sudoers + start_gravity start_supervisor fi - if [[ ! -z $PULL_IE_IMAGES ]]; then - echo "About to pull IE images. Depending on the size, this may take a while!" + if [[ ! -z $PULL_IT_IMAGES ]]; then + echo "About to pull IT images. Depending on the size, this may take a while!" - for ie in {JUPYTER,RSTUDIO,ETHERCALC,PHINCH,NEO}; do - enabled_var_name="GALAXY_EXTRAS_IE_FETCH_${ie}"; + for it in {JUPYTER,RSTUDIO,ETHERCALC,PHINCH,NEO}; do + enabled_var_name="GALAXY_EXTRAS_IT_FETCH_${it}"; if [[ ${!enabled_var_name} ]]; then # Store name in a var - image_var_name="GALAXY_EXTRAS_${ie}_IMAGE" + image_var_name="GALAXY_EXTRAS_IT_${it}_IMAGE" # And then read from that var docker pull "${!image_var_name}" fi done fi - - # in privileged mode autofs and CVMFS is available - # install autofs - echo "Installing autofs to enable automatic CVMFS mounts" - apt-get install autofs --no-install-recommends -y - apt-get autoremove -y && apt-get clean && rm -rf /var/lib/apt/lists/* else - echo "Disable Galaxy Interactive Environments. Start with --privileged to enable IE's." - export GALAXY_CONFIG_INTERACTIVE_ENVIRONMENT_PLUGINS_DIRECTORY="" + echo "Disable Galaxy Interactive Tools. Start with --privileged to enable ITs." + export GALAXY_CONFIG_INTERACTIVETOOLS_ENABLE=False + start_gravity start_supervisor fi -if [ "$USE_HTTPS_LETSENCRYPT" != "False" ] -then - echo "Settting up letsencrypt" - ansible-playbook -c local /ansible/provision.yml \ - --extra-vars gather_facts=False \ - --extra-vars galaxy_extras_config_ssl=True \ - --extra-vars galaxy_extras_config_ssl_method=letsencrypt \ - --extra-vars galaxy_extras_galaxy_domain="GALAXY_CONFIG_GALAXY_INFRASTRUCTURE_URL" \ - --extra-vars galaxy_extras_config_nginx_upload=False \ - --tags https -fi -if [ "$USE_HTTPS" != "False" ] -then - if [ -f /export/server.key -a -f /export/server.crt ] - then - echo "Copying SSL keys" - ansible-playbook -c local /ansible/provision.yml \ - --extra-vars gather_facts=False \ - --extra-vars galaxy_extras_config_ssl=True \ - --extra-vars galaxy_extras_config_ssl_method=own \ - --extra-vars src_nginx_ssl_certificate_key=/export/server.key \ - --extra-vars src_nginx_ssl_certificate=/export/server.crt \ - --extra-vars galaxy_extras_config_nginx_upload=False \ - --tags https - else - echo "Setting up self-signed SSL keys" - ansible-playbook -c local /ansible/provision.yml \ - --extra-vars gather_facts=False \ - --extra-vars galaxy_extras_config_ssl=True \ - --extra-vars galaxy_extras_config_ssl_method=self-signed \ - --extra-vars galaxy_extras_config_nginx_upload=False \ - --tags https - fi -fi - # In case the user wants the default admin to be created, do so. if [[ ! -z $GALAXY_DEFAULT_ADMIN_USER ]] then @@ -425,7 +550,7 @@ if [[ ! -z $GALAXY_DEFAULT_ADMIN_USER ]] if [ -x /export/post-start-actions.sh ] then # uses ephemeris, present in docker-galaxy-stable, to wait for the local instance - /tool_deps/_conda/bin/galaxy-wait -g http://127.0.0.1 -v --timeout 120 > $GALAXY_LOGS_DIR/post-start-actions.log && + /tool_deps/_conda/bin/galaxy-wait -g http://127.0.0.1 -v --timeout 600 > $GALAXY_LOGS_DIR/post-start-actions.log && /export/post-start-actions.sh >> $GALAXY_LOGS_DIR/post-start-actions.log & fi fi @@ -433,7 +558,7 @@ fi # Reinstall tools if the user want to if [[ ! -z $GALAXY_AUTO_UPDATE_TOOLS ]] then - /tool_deps/_conda/bin/galaxy-wait -g http://127.0.0.1 -v --timeout 120 > /home/galaxy/logs/post-start-actions.log && + /tool_deps/_conda/bin/galaxy-wait -g http://127.0.0.1 -v --timeout 600 > /home/galaxy/logs/post-start-actions.log && OLDIFS=$IFS IFS=',' for TOOL_YML in `echo "$GALAXY_AUTO_UPDATE_TOOLS"` @@ -445,9 +570,9 @@ if [[ ! -z $GALAXY_AUTO_UPDATE_TOOLS ]] IFS=$OLDIFS fi -# migrate custom IEs or Visualisations (Galaxy plugins) +# migrate custom Visualisations (Galaxy plugins) # this is needed for by the new client build system -python3 ${GALAXY_ROOT}/scripts/plugin_staging.py +python3 ${GALAXY_ROOT_DIR}/scripts/plugin_staging.py # Enable verbose output if [ `echo ${GALAXY_LOGGING:-'no'} | tr [:upper:] [:lower:]` = "full" ] diff --git a/galaxy/tools_conf_interactive.xml.sample b/galaxy/tools_conf_interactive.xml.sample new file mode 100644 index 000000000..dc1b8b9e4 --- /dev/null +++ b/galaxy/tools_conf_interactive.xml.sample @@ -0,0 +1,19 @@ + + +
+ + + + + + + + + + + + + + +
+
diff --git a/galaxy/welcome.html b/galaxy/welcome.html index 2906837ab..6e0a64bed 100644 --- a/galaxy/welcome.html +++ b/galaxy/welcome.html @@ -2,7 +2,7 @@ - + @@ -13,8 +13,8 @@

Hello, your Galaxy Docker container is running!

To customize this page you can create a welcome.html page in your directory mounted to /export.
-
Configuring Galaxy » - Installing Tools » + Configuring Galaxy » + Installing Tools » Guided Tour »
@@ -29,9 +29,9 @@

Hello, your Galaxy Docker container is running!

- Galaxy is an open platform for supporting data intensive - research. Galaxy is developed by The Galaxy Team - with the support of many contributors. + Galaxy is an open platform for supporting data intensive + research. Galaxy is developed by The Galaxy Team + with the support of many contributors. The Galaxy Docker project is supported by the University of Freiburg, part of de.NBI.

diff --git a/test/bioblend/Dockerfile b/test/bioblend/Dockerfile index 527d990eb..b3e9d1204 100644 --- a/test/bioblend/Dockerfile +++ b/test/bioblend/Dockerfile @@ -1,24 +1,25 @@ -FROM quay.io/bgruening/galaxy +FROM alpine:3.17 as build -USER galaxy -WORKDIR /home/galaxy - -ENV TOX_ENV=py37 \ +ENV BIOBLEND_VERSION=1.3.0 \ + TOX_ENV=py310 \ BIOBLEND_GALAXY_API_KEY=fakekey \ BIOBLEND_GALAXY_URL=http://galaxy \ BIOBLEND_TEST_JOB_TIMEOUT="240" \ - GALAXY_VERSION=release_20.05 + GALAXY_VERSION=release_24.1 + +ADD "https://github.com/galaxyproject/bioblend/archive/v$BIOBLEND_VERSION.zip" /src/bioblend.zip +RUN apk update && apk add curl python3-dev unzip \ + && python3 -m ensurepip --upgrade \ + && pip3 install pep8 tox "aiohttp==3.10.9" \ + && cd /src \ + && unzip bioblend.zip && rm bioblend.zip \ + && mv "bioblend-$BIOBLEND_VERSION" bioblend \ + && cd bioblend \ + && python3 setup.py install -RUN mkdir bioblend-master \ - && curl -L -s https://github.com/galaxyproject/bioblend/archive/master.tar.gz | tar xzf - --strip-components=1 -C bioblend-master \ - && cd bioblend-master \ - && export PATH=/tool_deps/_conda/bin/:$PATH && . activate galaxy_env \ - && pip install --upgrade "tox>=1.8.0" "pep8<=1.6.2" \ - && python setup.py install \ - && sed -i.bak "s/commands.*$/commands =/" tox.ini \ - && sed -i.bak2 "s/GALAXY_VERSION/GALAXY_VERSION BIOBLEND_TEST_JOB_TIMEOUT/" tox.ini +WORKDIR /src/bioblend -CMD /bin/bash -c "export PATH=/tool_deps/_conda/bin/:$PATH && cd /home/galaxy/bioblend-master && tox -e $TOX_ENV -- -k 'not test_download_dataset and not test_upload_from_galaxy_filesystem and not test_get_datasets and not test_datasets_from_fs and not test_tool_dependency_install and not test_download_history and not test_export_and_download'" +CMD /bin/sh -c "tox -e $TOX_ENV -- -k 'not test_upload_from_galaxy_filesystem and not test_get_datasets and not test_datasets_from_fs and not test_cancel_invocation and not test_run_step_actions'" # library tests, needs share /tmp filesystem # * test_upload_from_galaxy_filesystem diff --git a/test/bioblend/test.sh b/test/bioblend/test.sh index e48ce9c19..8c62a046b 100644 --- a/test/bioblend/test.sh +++ b/test/bioblend/test.sh @@ -1,3 +1,10 @@ #!/bin/bash -docker build -t bioblend_test . -docker run -it --link galaxy -v /tmp/:/tmp/ bioblend_test +if ! docker build -t bioblend_test .; then + echo "Bioblend docker image build failed." + exit 1 +fi + +if ! docker run --link galaxy -v /tmp/:/tmp/ bioblend_test; then + echo "Bioblend tests failed." + exit 1 +fi diff --git a/test/gridengine/test_outputhostname.py b/test/gridengine/test_outputhostname.py index 159fb5626..7f26ebb5f 100644 --- a/test/gridengine/test_outputhostname.py +++ b/test/gridengine/test_outputhostname.py @@ -4,18 +4,18 @@ from bioblend.galaxy import GalaxyInstance gi = GalaxyInstance('http://galaxytest', key='admin') gi.histories.create_history() -#print gi.tools.get_tool_panel() +#print(gi.tools.get_tool_panel()) history = gi.histories.get_most_recently_used_history() -#print dir(history) +#print(dir(history)) history_id = history['id'] -#print history_id +#print(history_id) tool_output = gi.tools.run_tool( history_id=history_id, tool_id="outputhostname", tool_inputs={} ) -#print tool_output +#print(tool_output) # loop until job finish timeout is 40sec as same as slurm result="noresult" @@ -27,4 +27,4 @@ dataset= gi.datasets.show_dataset(dataset_id) result=dataset['peek'] break -print result +print(result) diff --git a/test/slurm/Dockerfile b/test/slurm/Dockerfile index bf74cf193..d5bb58aa4 100644 --- a/test/slurm/Dockerfile +++ b/test/slurm/Dockerfile @@ -1,18 +1,17 @@ -FROM ubuntu:20.04 +FROM ubuntu:22.04 ENV DEBIAN_FRONTEND noninteractive RUN apt-get update -qq && apt-get install -y --no-install-recommends \ slurmd slurmctld \ - python-psutil supervisor virtualenv samtools apt-transport-https software-properties-common curl sudo gpg-agent && \ + python3-psutil supervisor virtualenv samtools apt-transport-https software-properties-common curl sudo gpg-agent && \ curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - && \ add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" && \ apt update && \ apt install -y docker-ce && \ apt-get autoremove -y && apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* && rm -rf ~/.cache/ && \ adduser galaxy &&\ - /usr/sbin/create-munge-key &&\ - touch /var/log/slurm-llnl/slurmctld.log /var/log/slurm-llnl/slurmd.log &&\ + touch /var/log/slurm/slurmctld.log /var/log/slurm/slurmd.log &&\ mkdir /tmp/slurm ADD configure_slurm.py /usr/local/bin/configure_slurm.py diff --git a/test/slurm/configure_slurm.py b/test/slurm/configure_slurm.py index 81859bed2..7586ac711 100644 --- a/test/slurm/configure_slurm.py +++ b/test/slurm/configure_slurm.py @@ -9,13 +9,10 @@ # Put this file on all nodes of your cluster. # See the slurm.conf man page for more information. # -ControlMachine=$control_machine -#ControlAddr= -#BackupController= -#BackupAddr= +SlurmctldHost=$control_machine +#SlurmctldAddr= # AuthType=auth/munge -CacheGroups=0 #CheckpointType=checkpoint/none CryptoType=crypto/munge MpiDefault=none @@ -54,14 +51,12 @@ #UnkillableStepTimeout=60 #VSizeFactor=0 Waittime=0 -FastSchedule=1 SchedulerType=sched/backfill -SchedulerPort=7321 SelectType=select/cons_res SelectTypeParameters=CR_Core_Memory AccountingStorageType=accounting_storage/none #AccountingStorageUser= -AccountingStoreJobComment=YES +AccountingStoreFlags=job_comment ClusterName=$cluster_name #DebugFlags= #JobCompHost= @@ -102,7 +97,7 @@ def main(): "memory": environ.get("SLURM_MEMORY", int(mem / (1024 * 1024))) } config_contents = Template(SLURM_CONFIG_TEMPLATE).substitute(template_params) - open("/etc/slurm-llnl/slurm.conf", "w").write(config_contents) + open("/etc/slurm/slurm.conf", "w").write(config_contents) if __name__ == "__main__": main() diff --git a/test/slurm/job_conf.xml b/test/slurm/job_conf.xml index 240977a7b..0344ab4cf 100644 --- a/test/slurm/job_conf.xml +++ b/test/slurm/job_conf.xml @@ -26,6 +26,9 @@ $galaxy_root:ro,$tool_directory:ro,$working_directory:rw,$default_file_path:rw --> $defaults + bridge + True + diff --git a/test/slurm/startup.sh b/test/slurm/startup.sh index 9b6cb4439..30ce5949a 100644 --- a/test/slurm/startup.sh +++ b/test/slurm/startup.sh @@ -17,8 +17,8 @@ fi if [ ! -f "$SLURM_CONF_PATH" ] then python /usr/local/bin/configure_slurm.py - cp /etc/slurm-llnl/slurm.conf "$SLURM_CONF_PATH" - rm /etc/slurm-llnl/slurm.conf + cp /etc/slurm/slurm.conf "$SLURM_CONF_PATH" + rm /etc/slurm/slurm.conf fi if [ ! -f "$GALAXY_DIR"/.venv ] then @@ -31,5 +31,5 @@ if [ ! -f "$GALAXY_DIR"/.venv ] fi chown $SLURM_USER_NAME /tmp/slurm ln -s "$GALAXY_DIR" "$SYMLINK_TARGET" -ln -s "$SLURM_CONF_PATH" /etc/slurm-llnl/slurm.conf +ln -s "$SLURM_CONF_PATH" /etc/slurm/slurm.conf exec /usr/bin/supervisord -n -c /etc/supervisor/supervisord.conf diff --git a/test/slurm/supervisor_slurm.conf b/test/slurm/supervisor_slurm.conf index 06482a1c5..52f538e2b 100644 --- a/test/slurm/supervisor_slurm.conf +++ b/test/slurm/supervisor_slurm.conf @@ -4,14 +4,14 @@ command=/usr/sbin/munged --key-file=%(ENV_MUNGE_KEY_PATH)s -F --force [program:slurmctld] user=root -command=/usr/sbin/slurmctld -D -L /var/log/slurm-llnl/slurmctld.log -f %(ENV_SLURM_CONF_PATH)s +command=/usr/sbin/slurmctld -D -L /var/log/slurm/slurmctld.log -f %(ENV_SLURM_CONF_PATH)s autostart = %(ENV_SLURMCTLD_AUTOSTART)s autorestart = true priority = 200 [program:slurmd] user=root -command=/usr/sbin/slurmd -f %(ENV_SLURM_CONF_PATH)s -D -L /var/log/slurm-llnl/slurmd.log +command=/usr/sbin/slurmd -f %(ENV_SLURM_CONF_PATH)s -D -L /var/log/slurm/slurmd.log autostart = %(ENV_SLURMD_AUTOSTART)s autorestart = true priority = 300