From debe9ff6f21106e1817a123b979ff9bd722428f7 Mon Sep 17 00:00:00 2001 From: jalil Date: Thu, 22 Aug 2024 21:05:35 +0200 Subject: [PATCH] granie bug fixed --- src/methods/multi_omics/granie_ns/main.nf | 2 +- src/robustness_analysis/add_noise_grn.py | 2 +- .../robustness_analysis/config.vsh.yaml | 28 ++++ src/workflows/robustness_analysis/main.nf | 129 ++++++++++++++++++ 4 files changed, 159 insertions(+), 2 deletions(-) create mode 100644 src/workflows/robustness_analysis/config.vsh.yaml create mode 100644 src/workflows/robustness_analysis/main.nf diff --git a/src/methods/multi_omics/granie_ns/main.nf b/src/methods/multi_omics/granie_ns/main.nf index ce360506f..552e3e5ff 100644 --- a/src/methods/multi_omics/granie_ns/main.nf +++ b/src/methods/multi_omics/granie_ns/main.nf @@ -8,7 +8,7 @@ workflow run_wf { | granie.run( fromState: [ multiomics_rna_r: "multiomics_rna_r", - multiomics_ata_r: "multiomics_ata_r", + multiomics_atac_r: "multiomics_atac_r", num_workers: "num_workers" ], toState: [prediction:"prediction"] diff --git a/src/robustness_analysis/add_noise_grn.py b/src/robustness_analysis/add_noise_grn.py index ad43332bf..ae6b371b7 100644 --- a/src/robustness_analysis/add_noise_grn.py +++ b/src/robustness_analysis/add_noise_grn.py @@ -5,7 +5,7 @@ layer = 'scgen_pearson' grn_folder = 'resources/grn_models' grn_folder_noised = 'resources/supplementary/grn_models_noised' -noise_ratio = 0.2 +noise_ratio = 0.5 # permute_ratio = 0.2 # Ensure the output folder exists diff --git a/src/workflows/robustness_analysis/config.vsh.yaml b/src/workflows/robustness_analysis/config.vsh.yaml new file mode 100644 index 000000000..f5c6faa72 --- /dev/null +++ b/src/workflows/robustness_analysis/config.vsh.yaml @@ -0,0 +1,28 @@ + +functionality: + name: run_robustness_analysis + namespace: "workflows" + info: + label: run_robustness_analysis + summary: "Evaluates GRNs and provides scores using regression analysis." + + + resources: + - type: nextflow_script + path: main.nf + entrypoint: run_wf + - type: file + path: ../../api/task_info.yaml + dependencies: + - name: common/extract_metadata + repository: openproblemsv2 + - name: metrics/regression_1 + repositories: + - name: openproblemsv2 + type: github + repo: openproblems-bio/openproblems-v2 + tag: main_build +platforms: + - type: nextflow + directives: + label: [ midtime, midmem, lowcpu ] diff --git a/src/workflows/robustness_analysis/main.nf b/src/workflows/robustness_analysis/main.nf new file mode 100644 index 000000000..d3eb8ee16 --- /dev/null +++ b/src/workflows/robustness_analysis/main.nf @@ -0,0 +1,129 @@ + +workflow auto { + findStatesTemp(params, meta.config) + | meta.workflow.run( + auto: [publish: "state"] + ) +} + +workflow run_wf { + take: + input_ch + + main: + + // construct list of metrics + metrics = [ + regression_1 + ] + + /*************************** + * RUN METRICS * + ***************************/ + score_ch = input_ch + | map{ id, state -> + [id, state + ["_meta": [join_id: id]]] + } + + | positive_control.run( + runIf: { id, state -> + state.method_id == 'positive_control' + }, + fromState: [ + perturbation_data: "perturbation_data", + layer: "layer", + tf_all: "tf_all" + ], + toState: {id, output, state -> + state + [ + prediction: output.prediction + ] + } + ) + | negative_control.run( + runIf: { id, state -> + state.method_id == 'negative_control' + }, + fromState: [ + perturbation_data: "perturbation_data" + ], + toState: {id, output, state -> + state + [ + prediction: output.prediction + ] + } + ) + + // run all metrics + | runEach( + components: metrics, + id: { id, state, comp -> + id + "." + comp.config.functionality.name + }, + // use 'fromState' to fetch the arguments the component requires from the overall state + fromState: [ + perturbation_data: "perturbation_data", + layer: "layer", + prediction: "prediction", + subsample: "subsample", + reg_type: "reg_type", + method_id: "method_id", + max_workers: "max_workers", + consensus: "consensus" + ], + // use 'toState' to publish that component's outputs to the overall state + toState: { id, output, state, comp -> + state + [ + metric_id: comp.config.functionality.name, + metric_output: output.score + ] + } + ) + + output_ch = score_ch + + // extract the scores + | extract_metadata.run( + key: "extract_scores", + fromState: [input: "metric_output"], + toState: { id, output, state -> + state + [ + score_uns: readYaml(output.output).uns + ] + } + ) + + | joinStates { ids, states -> + assert states[0]._meta, "no _meta found in state[0]" + // store the metric configs in a file + def metric_configs = metrics.collect{it.config} + def metric_configs_yaml_blob = toYamlBlob(metric_configs) + def metric_configs_file = tempFile("metric_configs.yaml") + metric_configs_file.write(metric_configs_yaml_blob) + + def task_info_file = meta.resources_dir.resolve("task_info.yaml") + + // store the scores in a file + def score_uns = states.collect{it.score_uns} + def score_uns_yaml_blob = toYamlBlob(score_uns) + def score_uns_file = tempFile("score_uns.yaml") + score_uns_file.write(score_uns_yaml_blob) + + def new_state = [ + metric_configs: metric_configs_file, + scores: score_uns_file, + _meta: states[0]._meta + ] + + ["output", new_state] + } + + // merge all of the output data + | joinStates{ ids, states -> + def mergedStates = states.inject([:]) { acc, m -> acc + m } + [ids[0], mergedStates] + } + + emit: + output_ch +}