From e46562f8949594d2d6239ff13082e8be0987f63c Mon Sep 17 00:00:00 2001 From: John Major Date: Tue, 29 Oct 2024 17:48:39 -0700 Subject: [PATCH 1/2] 0.7.30 --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 9b5531d2..556f86c2 100755 --- a/setup.py +++ b/setup.py @@ -2,7 +2,7 @@ setup( name="daylily", - version="0.7.29", + version="0.7.30", packages=find_packages(), install_requires=[ # Add dependencies here, From 9c07b61d7c71c3c03d8c21f4fb0c9fb48f10dd59 Mon Sep 17 00:00:00 2001 From: John Major Date: Tue, 29 Oct 2024 17:58:37 -0700 Subject: [PATCH 2/2] X --- README.md | 6 +- .../analysis-wdls/docs/common_errors.md | 67 ------------------- config/CROMWELL/immuno/workflow.sh | 1 + docs/quickinstall.md | 6 +- 4 files changed, 11 insertions(+), 69 deletions(-) delete mode 100755 config/CROMWELL/immuno/analysis-wdls/docs/common_errors.md diff --git a/README.md b/README.md index 6a34dc61..5eb00874 100644 --- a/README.md +++ b/README.md @@ -622,4 +622,8 @@ _alias it for your shell:_ `alias goday=". ~/projects/daylily/bin/ssh_into_dayli ## AWS Cloudwatch - The AWS Cloudwatch console can be used to monitor the cluster, and the resources it is using. This is a good place to monitor the health of the cluster, and in particular the slurm and pcluster logs for the headnode and compute fleet. - Navigate to your `cloudwatch` console, then select `dashboards` and there will be a dashboard named for the name you used for the cluster. Follow this link (be sure you are in the `us-west-2` region) to see the logs and metrics for the cluster. -- Reports are not automaticaly created for spot instances, but you may extend this base report as you like. This dashboard is automatically created by `pcluster` for each new cluster you create (and will be deleted when the cluster is deleted). \ No newline at end of file +- Reports are not automaticaly created for spot instances, but you may extend this base report as you like. This dashboard is automatically created by `pcluster` for each new cluster you create (and will be deleted when the cluster is deleted). + + +# In Progress // Future Development +- Running Cromwell WDL's is in early stages, and preliminary & still lightly documented work can be found [here](config/CROMWELL/immuno/workflow.sh) ( using the https://github.com/wustl-oncology as starting workflows ). \ No newline at end of file diff --git a/config/CROMWELL/immuno/analysis-wdls/docs/common_errors.md b/config/CROMWELL/immuno/analysis-wdls/docs/common_errors.md deleted file mode 100755 index f34a60de..00000000 --- a/config/CROMWELL/immuno/analysis-wdls/docs/common_errors.md +++ /dev/null @@ -1,67 +0,0 @@ -# Common Errors - -A few common errors that we've encountered and potential solutions - -## Out of Space - -These errors indicate that the disk storage space has been -filled. That part is pretty straightforward. The part that's a bit -more of a pitfall is that _depending on what output_ this happened on, -you need to change different disk sizes. - -If the failed write happened at `/cromwell_root` path, then `disks: -"local-disk ..."` needs to be increased. However, if the failed write -happens during to `stdin` or `stdout`, or any of the other standard -Linux-y places, then you'll need to increase the value of -`bootDiskSizeGb`. Cromwell in GCP mounts two disks, at minimum: the -boot disk, and a local-disk. Boot disk handles all the operating -system files, but local-disk is where almost all of your "work" is -going to happen, besides piping between commands. - -## File missing - -This applies more to newly converted files than hardened ones _but_ -many runs failed because a file wasn't included in the -instance. Generally, this happens because the CWL did not specify a -secondaryFile that it assumed would exist next to the passed in -file. This works on the cluster, because the tools just look for the -file and it already sits where it's expected. This does not work on -the cloud because that file is never sent to the instance. The -solution is to add this parameter explicitly to the WDL and pass it -through, top down. - -## CommandException: No URLs matched - -This is one of two things. Either (A) the input is malformed or -otherwise incorrect, or (B) the specified file was not uploaded to the -bucket. These are both instances of the general version of the error, -"No file has been uploaded to the specified URL". - -# Differences from CWL - -Last confirmed mirror with the analysis-workflows CWL repo was commit -788bdc99c1d5b6ee7c431c3c011eb30d385c1370, PR#1063, Apr6 2022. Commits -from that point on may deviate unless compared. Update these values if -that is done. - -## Directory types must be a zip file, or Array[File] - -There is not yet a supported Directory type in WDL. Instances of this -like `Directory vep_cache_dir` which involve nested directory structure are -replaced with `File vep_cache_dir_zip`. Instances of this like -`Directory hla_call_files` which are just a flat collection of files are -replaced with `Array[File] hla_call_files`. - - -## Input files must prefix arguments with the name of the workflow - -Input files must prefix each argument with the name of the workflow -they're going to run, because a WDL file can contain multiple -workflows or pass inputs over a layer if they aren't propagated -through in the definition. e.g. to call workflow `somaticExome` with -input `foo`, yaml key must be `somaticExome.foo` - -If WDLs are being used leveraging the -[`cloud-workflows/scripts/cloudize-workflow.py` helper -script](https://github.com/griffithlab/cloud-workflows/tree/main/scripts), -the generated input file will have this handled already. diff --git a/config/CROMWELL/immuno/workflow.sh b/config/CROMWELL/immuno/workflow.sh index a60fca2f..466a8dde 100755 --- a/config/CROMWELL/immuno/workflow.sh +++ b/config/CROMWELL/immuno/workflow.sh @@ -1,5 +1,6 @@ #!/bin/bash + # using the WDLs from https://github.com/wustl-oncology cp config/MGI_example/procs.txt . diff --git a/docs/quickinstall.md b/docs/quickinstall.md index 6a34dc61..5eb00874 100644 --- a/docs/quickinstall.md +++ b/docs/quickinstall.md @@ -622,4 +622,8 @@ _alias it for your shell:_ `alias goday=". ~/projects/daylily/bin/ssh_into_dayli ## AWS Cloudwatch - The AWS Cloudwatch console can be used to monitor the cluster, and the resources it is using. This is a good place to monitor the health of the cluster, and in particular the slurm and pcluster logs for the headnode and compute fleet. - Navigate to your `cloudwatch` console, then select `dashboards` and there will be a dashboard named for the name you used for the cluster. Follow this link (be sure you are in the `us-west-2` region) to see the logs and metrics for the cluster. -- Reports are not automaticaly created for spot instances, but you may extend this base report as you like. This dashboard is automatically created by `pcluster` for each new cluster you create (and will be deleted when the cluster is deleted). \ No newline at end of file +- Reports are not automaticaly created for spot instances, but you may extend this base report as you like. This dashboard is automatically created by `pcluster` for each new cluster you create (and will be deleted when the cluster is deleted). + + +# In Progress // Future Development +- Running Cromwell WDL's is in early stages, and preliminary & still lightly documented work can be found [here](config/CROMWELL/immuno/workflow.sh) ( using the https://github.com/wustl-oncology as starting workflows ). \ No newline at end of file