diff --git a/.github/workflows/qiita-ci.yml b/.github/workflows/qiita-ci.yml
index 0e25ab364..69ad24694 100644
--- a/.github/workflows/qiita-ci.yml
+++ b/.github/workflows/qiita-ci.yml
@@ -104,9 +104,7 @@ jobs:
- name: Install plugins
shell: bash -l {0}
run: |
- wget https://data.qiime2.org/distro/core/qiime2-2022.11-py38-linux-conda.yml
- conda env create --quiet -n qtp-biom --file qiime2-2022.11-py38-linux-conda.yml
- rm qiime2-2022.11-py38-linux-conda.yml
+ conda env create -n qtp-biom --file https://data.qiime2.org/distro/amplicon/qiime2-amplicon-2024.5-py39-linux-conda.yml
export QIITA_ROOTCA_CERT=`pwd`/qiita_core/support_files/ci_rootca.crt
export QIITA_CONFIG_FP=`pwd`/qiita_core/support_files/config_test.cfg
export REDBIOM_HOST="http://localhost:7379"
@@ -181,7 +179,7 @@ jobs:
echo "Connecting as $USER@localhost"
# this line (and the -o StrictHostKeyChecking=no) is so the server
# is added to the list of known servers
- scp -o StrictHostKeyChecking=no -i $PWD/qiita_ware/test/test_data/test_key $USER@localhost:/home/runner/work/qiita/qiita/qiita_ware/test/test_data/random_key /home/runner/work/qiita/qiita/qiita_ware/test/test_data/random_key_copy_1
+ scp -O -o StrictHostKeyChecking=no -i $PWD/qiita_ware/test/test_data/test_key $USER@localhost:/home/runner/work/qiita/qiita/qiita_ware/test/test_data/random_key /home/runner/work/qiita/qiita/qiita_ware/test/test_data/random_key_copy_1
- name: Main tests
shell: bash -l {0}
diff --git a/CHANGELOG.md b/CHANGELOG.md
index c21fa43e7..b4ed3b113 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,72 @@
# Qiita changelog
+Version 2025.02
+---------------
+
+Deployed on February 24th, 2025
+
+* Replaced os.rename for shutil.move in the code to fix [#3455](https://github.com/qiita-spots/qiita/issues/3455).
+* Via qp-spades, replaced the legacy `spades` command for `cloudSPAdes` for TellSeq.
+* `FASTA_preprocessed` within qtp-sequencing now allows for results to be named using their sample-name, extra from run-prefix.
+* `Remove SynDNA inserts & plasmid reads` superseded `Remove SynDNA reads`, which now removes SynDna inserts and plasmids.
+* `update_resource_allocation_redis` now relies on using equations stored in the database vs. hardcoded; thank you @Gossty!
+* SPP: Updated prep-info file generation to identify and report filtered fastq files that could not be matched to a sample-id instead of silently ignoring them.
+* SPP: Removed legacy test code and example files for amplicon processing. Some other tests updated and repurposed.
+* SPP: jobs are now easier to restart.
+* SPP: MultiQC report generation is now a separate slurm job & use jinja2 templates; also FastQC use jinja2 templates.
+
+
+Version 2025.01
+---------------
+
+Deployed on January 15th, 2025
+
+* The Analysis owner is now displayed in the analysis list and the individual analysis page.
+* Admins can now use the per-preparation "Download Data Release" button to get a "BIOM" release; this version is focus on NPH data releases.
+* Improved complete_job creation time, which should result in Qiita jobs ([multiple steps](https://qiita.ucsd.edu/static/doc/html/dev/resource_allocation.html) finishing faster; for bencharks visit [patch 93.sql](https://github.com/qiita-spots/qiita/blob/master/qiita_db/support_files/patches/93.sql).
+* SPP improvements: TellSeq support added; plugin refactored to allow for easier additions like TellSeq in the future. Job restart greatly improved. Much improved handling of sample-names and ids that contain substrings like ‘I1’ and ‘R2’. New SequenceCount job can count sequences and base-pairs in parallel for any list of fastq files.
+* Other general fixes [#3440](https://github.com/qiita-spots/qiita/pull/3440), [#3445](https://github.com/qiita-spots/qiita/pull/3445), [#3446](https://github.com/qiita-spots/qiita/pull/3446),
+
+
+Version 2024.10
+---------------
+
+Deployed on October 14th, 2024
+
+* Added update_resource_allocation_redis and companion code, so resource allocations summaries are available for review. Thank you @Gossty!
+* Now is possible to have default workflows with only one step.
+* `qiita_client.update_job_step` now accepts an ignore_error optional parameter. Thank you @charles-cowart!
+* Initial changes in `qiita_client` to have more accurate variable names: `QIITA_SERVER_CERT` -> `QIITA_ROOTCA_CERT`. Thank you @charles-cowart!
+* Added `get_artifact_html_summary` to `qiita_client` to retrieve the summary file of an artifact.
+* Re-added github actions to `https://github.com/qiita-spots/qiita_client`.
+* `SortMeRNA v4.3.7` superseded `Sortmerna v2.1b`, which relies on Silva 138 and now produced even mates. Thank you @ekopylova and @biocodz for the support.
+* `Remove SynDNA reads` superseded `SynDNA Woltka`, which now generates even mates.
+* `Woltka v0.1.7, paired-end` superseded `Woltka v0.1.6` in `qp-woltka`; [more information](https://qiita.ucsd.edu/static/doc/html/processingdata/woltka_pairedend.html). Thank you to @qiyunzhu for the benchmarks!
+* Other general fixes, like [#3424](https://github.com/qiita-spots/qiita/pull/3424), [#3425](https://github.com/qiita-spots/qiita/pull/3425), [#3439](https://github.com/qiita-spots/qiita/pull/3439), [#3440](https://github.com/qiita-spots/qiita/pull/3440).
+* General SPP improvements, like: [NuQC modified to preserve metadata in fastq files](https://github.com/biocore/mg-scripts/pull/155), [use squeue instead of sacct](https://github.com/biocore/mg-scripts/pull/152), , [job aborts if Qiita study contains sample metadata columns reserved for prep-infos](https://github.com/biocore/mg-scripts/pull/151), [metapool generates OverrideCycles value](https://github.com/biocore/metagenomics_pooling_notebook/pull/225).
+* We updated the available parameters for `Filter features against reference [filter_features]`, `Non V4 16S sequence assessment [non_v4_16s]` and all the phylogenetic analytical commands so they can use `Greengenes2 2024.09`.
+
+
+
+Version 2024.07
+---------------
+
+Deployed on July 15th, 2024
+
+* On June 14th, 2024 we modified the SPP to use ["fastp & minimap2 against GRCh38.p14 + Phi X 174 + T2T-CHM13v2.0, then Movi against GRCh38.p14, T2T-CHM13v2.0 + Human Pangenome Reference Consortium release 2023"](https://github.com/cguccione/human_host_filtration) to filter human-reads.
+* Full refactor of the [DB patching system](https://github.com/qiita-spots/qiita/blob/master/CONTRIBUTING.md#patch-91sql) to make sure that a new production deployment has a fully empty database.
+* Fully removed Qiimp from Qiita.
+* Users can now add `ORCID`, `ResearchGate` and/or `GoogleScholar` information to their profile and the creation (registration) timestamp is kept in the database. Thank you @jlab.
+* Admins can now track and purge non-confirmed users from the database via the GUI (`/admin/purge_users/`). Thank you @jlab.
+* Added `qiita.slurm_resource_allocations` to store general job resource usage, which can be populated by `qiita_db.util.update_resource_allocation_table`.
+* Added `qiita_db.util.resource_allocation_plot` to generate different models to allocate resources from a given software command based on previous jobs, thank you @Gossty !
+* The stats page map can be centered via the configuration file; additionally, the Help and Admin emails are defined also via the configuration files, thank you @jlab !
+* ``Sequel IIe``, ``Revio``, and ``Onso`` are now valid instruments for the ``PacBio_SMRT`` platform.
+* Added `current_human_filtering` to the prep-information and `human_reads_filter_method` to the artifact to keep track of the method that it was used to human reads filter the raw artifact and know if it's up to date with what is expected via the best practices.
+* Added `reprocess_job_id` to the prep-information so we keep track if a preparation has been reprocessed with another job.
+* Other general fixes, like [#3385](https://github.com/qiita-spots/qiita/pull/3385), [#3397](https://github.com/qiita-spots/qiita/pull/3397), [#3399](https://github.com/qiita-spots/qiita/pull/3399), [#3400](https://github.com/qiita-spots/qiita/pull/3400), [#3409](https://github.com/qiita-spots/qiita/pull/3409), [#3410](https://github.com/qiita-spots/qiita/pull/3410).
+
+
Version 2024.02
---------------
@@ -167,7 +234,7 @@ Version 2021.11
* Allow chucked download of metadata files in analyses; this allows to process large meta-analysis (like those for The Microsetta Initiative) without worker blockage.
* Added to the qp-qiime2 plugin the possibility of filtering tables based on system available "FeatureData[Sequence]"; to start we added 90/100/150 bps bloom tables.
* Now we can instantiate a study via their title (Study.from_title); this will facilitate orchestration with qebil.
-* Speed up Study listing for admins and general users; the admin study display came down from 20 to 2 seconds.
+* Speed up Study listing for admins and general users; the admin study display came down from 20 to 2 seconds.
* Fixed the following issues: [3142](https://github.com/qiita-spots/qiita/issues/3142), [3149](https://github.com/qiita-spots/qiita/issues/3149), [3150](https://github.com/qiita-spots/qiita/issues/3150), [3119](https://github.com/qiita-spots/qiita/issues/3119), and [3160](https://github.com/qiita-spots/qiita/issues/3160).
diff --git a/qiita_core/__init__.py b/qiita_core/__init__.py
index 7fca55a7c..041d7f77c 100644
--- a/qiita_core/__init__.py
+++ b/qiita_core/__init__.py
@@ -6,4 +6,4 @@
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
-__version__ = "2024.02"
+__version__ = "2025.02"
diff --git a/qiita_db/__init__.py b/qiita_db/__init__.py
index 63fbc26fc..3d4a06d0d 100644
--- a/qiita_db/__init__.py
+++ b/qiita_db/__init__.py
@@ -27,7 +27,7 @@
from . import user
from . import processing_job
-__version__ = "2024.02"
+__version__ = "2025.02"
__all__ = ["analysis", "artifact", "archive", "base", "commands",
"environment_manager", "exceptions", "investigation", "logger",
diff --git a/qiita_db/analysis.py b/qiita_db/analysis.py
index c7e44855f..dc9126691 100644
--- a/qiita_db/analysis.py
+++ b/qiita_db/analysis.py
@@ -215,6 +215,22 @@ def create(cls, owner, name, description, from_default=False,
job.submit()
return instance
+ @classmethod
+ def delete_analysis_artifacts(cls, _id):
+ """Deletes the artifacts linked to an artifact and then the analysis
+
+ Parameters
+ ----------
+ _id : int
+ The analysis id
+ """
+ analysis = cls(_id)
+ aids = [a.id for a in analysis.artifacts if not a.parents]
+ aids.sort(reverse=True)
+ for aid in aids:
+ qdb.artifact.Artifact.delete(aid)
+ cls.delete(analysis.id)
+
@classmethod
def delete(cls, _id):
"""Deletes an analysis
diff --git a/qiita_db/archive.py b/qiita_db/archive.py
index aab68f783..d411d598a 100644
--- a/qiita_db/archive.py
+++ b/qiita_db/archive.py
@@ -116,6 +116,7 @@ def get_merging_scheme_from_job(cls, job):
acmd = job.command
parent = job.input_artifacts[0]
parent_pparameters = parent.processing_parameters
+ phms = None
if parent_pparameters is None:
parent_cmd_name = None
parent_parameters = None
@@ -125,12 +126,26 @@ def get_merging_scheme_from_job(cls, job):
parent_cmd_name = pcmd.name
parent_parameters = parent_pparameters.values
parent_merging_scheme = pcmd.merging_scheme
-
- return qdb.util.human_merging_scheme(
+ if not parent_merging_scheme['ignore_parent_command']:
+ gp = parent.parents[0]
+ gp_params = gp.processing_parameters
+ if gp_params is not None:
+ gp_cmd = gp_params.command
+ phms = qdb.util.human_merging_scheme(
+ parent_cmd_name, parent_merging_scheme,
+ gp_cmd.name, gp_cmd.merging_scheme,
+ parent_parameters, [], gp_params.values)
+
+ hms = qdb.util.human_merging_scheme(
acmd.name, acmd.merging_scheme,
parent_cmd_name, parent_merging_scheme,
job.parameters.values, [], parent_parameters)
+ if phms is not None:
+ hms = qdb.util.merge_overlapping_strings(hms, phms)
+
+ return hms
+
@classmethod
def retrieve_feature_values(cls, archive_merging_scheme=None,
features=None):
diff --git a/qiita_db/artifact.py b/qiita_db/artifact.py
index 94f335c94..bf81ddf41 100644
--- a/qiita_db/artifact.py
+++ b/qiita_db/artifact.py
@@ -929,7 +929,8 @@ def can_be_submitted_to_ebi(self):
# words has more that one processing step behind it
fine_to_send = []
fine_to_send.extend([pt.artifact for pt in self.prep_templates])
- fine_to_send.extend([c for a in fine_to_send for c in a.children])
+ fine_to_send.extend([c for a in fine_to_send if a is not None
+ for c in a.children])
if self not in fine_to_send:
return False
@@ -1342,23 +1343,6 @@ def _helper(sql_edges, edges, nodes):
# If the job is in success we don't need to do anything
# else since it would've been added by the code above
if jstatus != 'success':
- # Connect the job with his input artifacts, the
- # input artifacts may or may not exist yet, so we
- # need to check both the input_artifacts and the
- # pending properties
- for in_art in n_obj.input_artifacts:
- iid = in_art.id
- if iid not in nodes and iid in extra_nodes:
- nodes[iid] = extra_nodes[iid]
- _add_edge(edges, nodes[iid], nodes[n_obj.id])
-
- pending = n_obj.pending
- for pred_id in pending:
- for pname in pending[pred_id]:
- in_node_id = '%s:%s' % (
- pred_id, pending[pred_id][pname])
- _add_edge(edges, nodes[in_node_id],
- nodes[n_obj.id])
if jstatus != 'error':
# If the job is not errored, we can add the
@@ -1380,6 +1364,34 @@ def _helper(sql_edges, edges, nodes):
queue.append(cjob.id)
if cjob.id not in nodes:
nodes[cjob.id] = ('job', cjob)
+
+ # including the outputs
+ for o_name, o_type in cjob.command.outputs:
+ node_id = '%s:%s' % (cjob.id, o_name)
+ node = TypeNode(
+ id=node_id, job_id=cjob.id,
+ name=o_name, type=o_type)
+ if node_id not in nodes:
+ nodes[node_id] = ('type', node)
+
+ # Connect the job with his input artifacts, the
+ # input artifacts may or may not exist yet, so we
+ # need to check both the input_artifacts and the
+ # pending properties
+ for in_art in n_obj.input_artifacts:
+ iid = in_art.id
+ if iid not in nodes and iid in extra_nodes:
+ nodes[iid] = extra_nodes[iid]
+ _add_edge(edges, nodes[iid], nodes[n_obj.id])
+
+ pending = n_obj.pending
+ for pred_id in pending:
+ for pname in pending[pred_id]:
+ in_node_id = '%s:%s' % (
+ pred_id, pending[pred_id][pname])
+ _add_edge(edges, nodes[in_node_id],
+ nodes[n_obj.id])
+
elif n_type == 'type':
# Connect this 'future artifact' with the job that will
# generate it
@@ -1684,3 +1696,51 @@ def get_commands(self):
cids = cmds & cids
return [qdb.software.Command(cid) for cid in cids]
+
+ @property
+ def human_reads_filter_method(self):
+ """The human_reads_filter_method of the artifact
+
+ Returns
+ -------
+ str
+ The human_reads_filter_method name
+ """
+ with qdb.sql_connection.TRN:
+ sql = """SELECT human_reads_filter_method
+ FROM qiita.artifact
+ LEFT JOIN qiita.human_reads_filter_method
+ USING (human_reads_filter_method_id)
+ WHERE artifact_id = %s"""
+ qdb.sql_connection.TRN.add(sql, [self.id])
+ return qdb.sql_connection.TRN.execute_fetchlast()
+
+ @human_reads_filter_method.setter
+ def human_reads_filter_method(self, value):
+ """Set the human_reads_filter_method of the artifact
+
+ Parameters
+ ----------
+ value : str
+ The new artifact's human_reads_filter_method
+
+ Raises
+ ------
+ ValueError
+ If `value` doesn't exist in the database
+ """
+ with qdb.sql_connection.TRN:
+ sql = """SELECT human_reads_filter_method_id
+ FROM qiita.human_reads_filter_method
+ WHERE human_reads_filter_method = %s"""
+ qdb.sql_connection.TRN.add(sql, [value])
+ idx = qdb.sql_connection.TRN.execute_fetchflatten()
+
+ if len(idx) == 0:
+ raise ValueError(
+ f'"{value}" is not a valid human_reads_filter_method')
+
+ sql = """UPDATE qiita.artifact
+ SET human_reads_filter_method_id = %s
+ WHERE artifact_id = %s"""
+ qdb.sql_connection.TRN.add(sql, [idx[0], self.id])
diff --git a/qiita_db/handlers/processing_job.py b/qiita_db/handlers/processing_job.py
index 6bb15cdf4..832d2407a 100644
--- a/qiita_db/handlers/processing_job.py
+++ b/qiita_db/handlers/processing_job.py
@@ -146,7 +146,9 @@ def post(self, job_id):
cmd, values_dict={'job_id': job_id,
'payload': self.request.body.decode(
'ascii')})
- job = qdb.processing_job.ProcessingJob.create(job.user, params)
+ # complete_job are unique so it is fine to force them to be created
+ job = qdb.processing_job.ProcessingJob.create(
+ job.user, params, force=True)
job.submit()
self.finish()
diff --git a/qiita_db/handlers/tests/test_processing_job.py b/qiita_db/handlers/tests/test_processing_job.py
index 5ef82669a..b747b1f3e 100644
--- a/qiita_db/handlers/tests/test_processing_job.py
+++ b/qiita_db/handlers/tests/test_processing_job.py
@@ -233,9 +233,9 @@ def test_post_job_success(self):
self.assertIsNotNone(cj)
# additionally we can test that job.print_trace is correct
self.assertEqual(job.trace, [
- f'{job.id} [Not Available]: Validate | '
+ f'{job.id} [Not Available] (success): Validate | '
'-p qiita -N 1 -n 1 --mem 90gb --time 150:00:00 --nice=10000',
- f' {cj.id} [{cj.external_id}] | '
+ f' {cj.id} [{cj.external_id}] (success)| '
'-p qiita -N 1 -n 1 --mem 16gb --time 10:00:00 --nice=10000'])
def test_post_job_success_with_archive(self):
diff --git a/qiita_db/meta_util.py b/qiita_db/meta_util.py
index 79b049e37..04b5ad525 100644
--- a/qiita_db/meta_util.py
+++ b/qiita_db/meta_util.py
@@ -22,7 +22,8 @@
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
-from os import stat, rename
+from os import stat
+from shutil import move
from os.path import join, relpath, basename
from time import strftime, localtime
import matplotlib.pyplot as plt
@@ -37,11 +38,20 @@
from re import sub
from json import loads, dump, dumps
-from qiita_db.util import create_nested_path
+from qiita_db.util import create_nested_path, retrieve_resource_data
+from qiita_db.util import resource_allocation_plot
from qiita_core.qiita_settings import qiita_config, r_client
from qiita_core.configuration_manager import ConfigurationManager
import qiita_db as qdb
+# global constant list used in resource_allocation_page
+COLUMNS = [
+ "sName", "sVersion", "cID", "cName", "processing_job_id",
+ "parameters", "samples", "columns", "input_size", "extra_info",
+ "MaxRSSRaw", "ElapsedRaw", "Start", "node_name", "node_model"]
+RAW_DATA_ARTIFACT_TYPE = {
+ 'SFF', 'FASTQ', 'FASTA', 'FASTA_Sanger', 'per_sample_FASTQ'}
+
def _get_data_fpids(constructor, object_id):
"""Small function for getting filepath IDS associated with data object
@@ -111,9 +121,7 @@ def validate_filepath_access_by_user(user, filepath_id):
if artifact.visibility == 'public':
# TODO: https://github.com/biocore/qiita/issues/1724
- if artifact.artifact_type in ['SFF', 'FASTQ', 'FASTA',
- 'FASTA_Sanger',
- 'per_sample_FASTQ']:
+ if artifact.artifact_type in RAW_DATA_ARTIFACT_TYPE:
study = artifact.study
has_access = study.has_access(user, no_public=True)
if (not study.public_raw_download and not has_access):
@@ -462,7 +470,7 @@ def generate_biom_and_metadata_release(study_status='public'):
for c in iter(lambda: f.read(4096), b""):
md5sum.update(c)
- rename(tgz_name, tgz_name_final)
+ move(tgz_name, tgz_name_final)
vals = [
('filepath', tgz_name_final[len(working_dir):], r_client.set),
@@ -536,7 +544,7 @@ def generate_plugin_releases():
md5sum = md5()
for c in iter(lambda: f.read(4096), b""):
md5sum.update(c)
- rename(tgz_name, tgz_name_final)
+ move(tgz_name, tgz_name_final)
vals = [
('filepath', tgz_name_final[len(working_dir):], r_client.set),
('md5sum', md5sum.hexdigest(), r_client.set),
@@ -546,3 +554,98 @@ def generate_plugin_releases():
# important to "flush" variables to avoid errors
r_client.delete(redis_key)
f(redis_key, v)
+
+
+def get_software_commands(active):
+ software_list = [s for s in qdb.software.Software.iter(active=active)]
+ software_commands = defaultdict(lambda: defaultdict(list))
+
+ for software in software_list:
+ sname = software.name
+ sversion = software.version
+ commands = software.commands
+
+ for command in commands:
+ software_commands[sname][sversion].append(command.name)
+ software_commands[sname] = dict(software_commands[sname])
+
+ return dict(software_commands)
+
+
+def update_resource_allocation_redis(active=True):
+ """Updates redis with plots and information about current software.
+
+ Parameters
+ ----------
+ active: boolean, optional
+ Defaults to True. Should only be False when testing.
+
+ """
+ time = datetime.now().strftime('%m-%d-%y')
+ scommands = get_software_commands(active)
+ redis_key = 'resources:commands'
+ r_client.set(redis_key, str(scommands))
+
+ for sname, versions in scommands.items():
+ for version, commands in versions.items():
+ for cname in commands:
+ col_name = "samples * columns"
+ df = retrieve_resource_data(cname, sname, version, COLUMNS)
+ if len(df) == 0:
+ continue
+
+ fig, axs = resource_allocation_plot(df, col_name)
+ titles = [0, 0]
+ images = [0, 0]
+
+ # Splitting 1 image plot into 2 separate for better layout.
+ for i, ax in enumerate(axs):
+ titles[i] = ax.get_title()
+ ax.set_title("")
+ # new_fig, new_ax – copy with either only memory plot or
+ # only time
+ new_fig = plt.figure()
+ new_ax = new_fig.add_subplot(111)
+ line = ax.lines[0]
+ new_ax.plot(line.get_xdata(), line.get_ydata(),
+ linewidth=1, color='orange')
+ handles, labels = ax.get_legend_handles_labels()
+ for handle, label, scatter_data in zip(handles,
+ labels,
+ ax.collections):
+ color = handle.get_facecolor()
+ new_ax.scatter(scatter_data.get_offsets()[:, 0],
+ scatter_data.get_offsets()[:, 1],
+ s=scatter_data.get_sizes(), label=label,
+ color=color)
+
+ new_ax.set_xscale('log')
+ new_ax.set_yscale('log')
+ new_ax.set_xlabel(ax.get_xlabel())
+ new_ax.set_ylabel(ax.get_ylabel())
+ new_ax.legend(loc='upper left')
+
+ new_fig.tight_layout()
+ plot = BytesIO()
+ new_fig.savefig(plot, format='png')
+ plot.seek(0)
+ img = 'data:image/png;base64,' + quote(
+ b64encode(plot.getvalue()).decode('ascii'))
+ images[i] = img
+ plt.close(new_fig)
+ plt.close(fig)
+
+ # SID, CID, col_name
+ values = [
+ ("img_mem", images[0], r_client.set),
+ ("img_time", images[1], r_client.set),
+ ('time', time, r_client.set),
+ ("title_mem", titles[0], r_client.set),
+ ("title_time", titles[1], r_client.set)
+ ]
+
+ for k, v, f in values:
+ redis_key = 'resources$#%s$#%s$#%s$#%s:%s' % (
+ cname, sname, version, col_name, k)
+ r_client.delete(redis_key)
+ f(redis_key, v)
diff --git a/qiita_db/metadata_template/prep_template.py b/qiita_db/metadata_template/prep_template.py
index 9899cf729..059ccb55f 100644
--- a/qiita_db/metadata_template/prep_template.py
+++ b/qiita_db/metadata_template/prep_template.py
@@ -135,7 +135,7 @@ def create(cls, md_template, study, data_type, investigation_type=None,
# data_type being created - if possible
if investigation_type is None:
if data_type_str in TARGET_GENE_DATA_TYPES:
- investigation_type = 'Amplicon'
+ investigation_type = 'AMPLICON'
elif data_type_str == 'Metagenomic':
investigation_type = 'WGS'
elif data_type_str == 'Metatranscriptomic':
@@ -272,6 +272,32 @@ def delete(cls, id_):
"Cannot remove prep template %d because it has an artifact"
" associated with it" % id_)
+ # artifacts that are archived are not returned as part of the code
+ # above and we need to clean them before moving forward
+ sql = """SELECT artifact_id
+ FROM qiita.preparation_artifact
+ WHERE prep_template_id = %s"""
+ qdb.sql_connection.TRN.add(sql, args)
+ archived_artifacts = set(
+ qdb.sql_connection.TRN.execute_fetchflatten())
+ ANALYSIS = qdb.analysis.Analysis
+ if archived_artifacts:
+ for aid in archived_artifacts:
+ # before we can delete the archived artifact, we need
+ # to delete the analyses where they were used.
+ sql = """SELECT analysis_id
+ FROM qiita.analysis
+ WHERE analysis_id IN (
+ SELECT DISTINCT analysis_id
+ FROM qiita.analysis_sample
+ WHERE artifact_id IN %s)"""
+ qdb.sql_connection.TRN.add(sql, [tuple([aid])])
+ analyses = set(
+ qdb.sql_connection.TRN.execute_fetchflatten())
+ for _id in analyses:
+ ANALYSIS.delete_analysis_artifacts(_id)
+ qdb.artifact.Artifact.delete(aid)
+
# Delete the prep template filepaths
sql = """DELETE FROM qiita.prep_template_filepath
WHERE prep_template_id = %s"""
@@ -782,14 +808,24 @@ def _get_node_info(workflow, node):
parent_cmd_name = None
parent_merging_scheme = None
+ phms = None
if pcmd is not None:
parent_cmd_name = pcmd.name
parent_merging_scheme = pcmd.merging_scheme
+ if not parent_merging_scheme['ignore_parent_command']:
+ phms = _get_node_info(workflow, parent)
- return qdb.util.human_merging_scheme(
+ hms = qdb.util.human_merging_scheme(
ccmd.name, ccmd.merging_scheme, parent_cmd_name,
parent_merging_scheme, cparams, [], pparams)
+ # if the parent should not ignore its parent command, then we need
+ # to merge the previous result with the new one
+ if phms is not None:
+ hms = qdb.util.merge_overlapping_strings(hms, phms)
+
+ return hms
+
def _get_predecessors(workflow, node):
# recursive method to get predecessors of a given node
pred = []
@@ -815,6 +851,9 @@ def _get_predecessors(workflow, node):
pred.append(data)
return pred
+ # this is only helpful for when there are no _get_predecessors
+ return pred
+
# Note: we are going to use the final BIOMs to figure out which
# processing is missing from the back/end to the front, as this
# will prevent generating unnecessary steps (AKA already provided
@@ -842,7 +881,7 @@ def _get_predecessors(workflow, node):
'artifact transformation']
merging_schemes = {
qdb.archive.Archive.get_merging_scheme_from_job(j): {
- x: y.id for x, y in j.outputs.items()}
+ x: str(y.id) for x, y in j.outputs.items()}
# we are going to select only the jobs that were a 'success', that
# are not 'hidden' and that have an output - jobs that are not
# hidden and a successs but that do not have outputs are jobs which
@@ -937,6 +976,8 @@ def _get_predecessors(workflow, node):
if set(merging_schemes[info]) >= set(cxns):
init_artifacts = merging_schemes[info]
break
+ if not predecessors:
+ pnode = node
if init_artifacts is None:
pdp = pnode.default_parameter
pdp_cmd = pdp.command
@@ -958,7 +999,7 @@ def _get_predecessors(workflow, node):
init_artifacts = {
wkartifact_type: f'{starting_job.id}:'}
else:
- init_artifacts = {wkartifact_type: self.artifact.id}
+ init_artifacts = {wkartifact_type: str(self.artifact.id)}
cmds_to_create.reverse()
current_job = None
diff --git a/qiita_db/metadata_template/test/test_prep_template.py b/qiita_db/metadata_template/test/test_prep_template.py
index ea41694fe..c4978f47b 100644
--- a/qiita_db/metadata_template/test/test_prep_template.py
+++ b/qiita_db/metadata_template/test/test_prep_template.py
@@ -911,7 +911,7 @@ def _common_creation_checks(self, pt, fp_count, name):
self.assertEqual(pt.data_type(), self.data_type)
self.assertEqual(pt.data_type(ret_id=True), self.data_type_id)
self.assertEqual(pt.artifact, None)
- self.assertEqual(pt.investigation_type, 'Amplicon')
+ self.assertEqual(pt.investigation_type, 'AMPLICON')
self.assertEqual(pt.study_id, self.test_study.id)
self.assertEqual(pt.status, "sandbox")
exp_sample_ids = {'%s.SKB8.640193' % self.test_study.id,
@@ -1076,7 +1076,7 @@ def test_create_warning(self):
self.assertEqual(pt.data_type(), self.data_type)
self.assertEqual(pt.data_type(ret_id=True), self.data_type_id)
self.assertEqual(pt.artifact, None)
- self.assertEqual(pt.investigation_type, 'Amplicon')
+ self.assertEqual(pt.investigation_type, 'AMPLICON')
self.assertEqual(pt.study_id, self.test_study.id)
self.assertEqual(pt.status, 'sandbox')
exp_sample_ids = {'%s.SKB8.640193' % self.test_study.id,
@@ -1247,7 +1247,7 @@ def test_investigation_type_setter(self):
"""Able to update the investigation type"""
pt = qdb.metadata_template.prep_template.PrepTemplate.create(
self.metadata, self.test_study, self.data_type_id)
- self.assertEqual(pt.investigation_type, 'Amplicon')
+ self.assertEqual(pt.investigation_type, 'AMPLICON')
pt.investigation_type = "Other"
self.assertEqual(pt.investigation_type, 'Other')
with self.assertRaises(qdb.exceptions.QiitaDBColumnError):
diff --git a/qiita_db/processing_job.py b/qiita_db/processing_job.py
index 11145925b..27192bab7 100644
--- a/qiita_db/processing_job.py
+++ b/qiita_db/processing_job.py
@@ -582,10 +582,10 @@ def create(cls, user, parameters, force=False):
TTRN = qdb.sql_connection.TRN
with TTRN:
command = parameters.command
-
- # check if a job with the same parameters already exists
- sql = """SELECT processing_job_id, email, processing_job_status,
- COUNT(aopj.artifact_id)
+ if not force:
+ # check if a job with the same parameters already exists
+ sql = """SELECT processing_job_id, email,
+ processing_job_status, COUNT(aopj.artifact_id)
FROM qiita.processing_job
LEFT JOIN qiita.processing_job_status
USING (processing_job_status_id)
@@ -596,41 +596,42 @@ def create(cls, user, parameters, force=False):
GROUP BY processing_job_id, email,
processing_job_status"""
- # we need to use ILIKE because of booleans as they can be
- # false or False
- params = []
- for k, v in parameters.values.items():
- # this is necessary in case we have an Iterable as a value
- # but that is string
- if isinstance(v, Iterable) and not isinstance(v, str):
- for vv in v:
- params.extend([k, str(vv)])
+ # we need to use ILIKE because of booleans as they can be
+ # false or False
+ params = []
+ for k, v in parameters.values.items():
+ # this is necessary in case we have an Iterable as a value
+ # but that is string
+ if isinstance(v, Iterable) and not isinstance(v, str):
+ for vv in v:
+ params.extend([k, str(vv)])
+ else:
+ params.extend([k, str(v)])
+
+ if params:
+ # divided by 2 as we have key-value pairs
+ len_params = int(len(params)/2)
+ sql = sql.format(' AND ' + ' AND '.join(
+ ["command_parameters->>%s ILIKE %s"] * len_params))
+ params = [command.id] + params
+ TTRN.add(sql, params)
else:
- params.extend([k, str(v)])
-
- if params:
- # divided by 2 as we have key-value pairs
- len_params = int(len(params)/2)
- sql = sql.format(' AND ' + ' AND '.join(
- ["command_parameters->>%s ILIKE %s"] * len_params))
- params = [command.id] + params
- TTRN.add(sql, params)
- else:
- # the sql variable expects the list of parameters but if there
- # is no param we need to replace the {0} with an empty string
- TTRN.add(sql.format(""), [command.id])
-
- # checking that if the job status is success, it has children
- # [2] status, [3] children count
- existing_jobs = [r for r in TTRN.execute_fetchindex()
- if r[2] != 'success' or r[3] > 0]
- if existing_jobs and not force:
- raise ValueError(
- 'Cannot create job because the parameters are the same as '
- 'jobs that are queued, running or already have '
- 'succeeded:\n%s' % '\n'.join(
- ["%s: %s" % (jid, status)
- for jid, _, status, _ in existing_jobs]))
+ # the sql variable expects the list of parameters but if
+ # there is no param we need to replace the {0} with an
+ # empty string
+ TTRN.add(sql.format(""), [command.id])
+
+ # checking that if the job status is success, it has children
+ # [2] status, [3] children count
+ existing_jobs = [r for r in TTRN.execute_fetchindex()
+ if r[2] != 'success' or r[3] > 0]
+ if existing_jobs:
+ raise ValueError(
+ 'Cannot create job because the parameters are the '
+ 'same as jobs that are queued, running or already '
+ 'have succeeded:\n%s' % '\n'.join(
+ ["%s: %s" % (jid, status)
+ for jid, _, status, _ in existing_jobs]))
sql = """INSERT INTO qiita.processing_job
(email, command_id, command_parameters,
@@ -2052,23 +2053,25 @@ def complete_processing_job(self):
def trace(self):
""" Returns as a text array the full trace of the job, from itself
to validators and complete jobs"""
- lines = [f'{self.id} [{self.external_id}]: '
+ lines = [f'{self.id} [{self.external_id}] ({self.status}): '
f'{self.command.name} | {self.resource_allocation_info}']
cjob = self.complete_processing_job
if cjob is not None:
- lines.append(f' {cjob.id} [{cjob.external_id}] | '
+ lines.append(f' {cjob.id} [{cjob.external_id}] ({cjob.status})| '
f'{cjob.resource_allocation_info}')
vjob = self.release_validator_job
if vjob is not None:
lines.append(f' {vjob.id} [{vjob.external_id}] '
- f'| {vjob.resource_allocation_info}')
+ f' ({vjob.status}) | '
+ f'{vjob.resource_allocation_info}')
for v in self.validator_jobs:
- lines.append(f' {v.id} [{v.external_id}]: '
+ lines.append(f' {v.id} [{v.external_id}] ({v.status}): '
f'{v.command.name} | {v.resource_allocation_info}')
cjob = v.complete_processing_job
if cjob is not None:
lines.append(f' {cjob.id} [{cjob.external_id}] '
- f'| {cjob.resource_allocation_info}')
+ f'({cjob.status}) | '
+ f'{cjob.resource_allocation_info}')
return lines
diff --git a/qiita_db/software.py b/qiita_db/software.py
index 8b27078a3..fee35a21b 100644
--- a/qiita_db/software.py
+++ b/qiita_db/software.py
@@ -1995,9 +1995,20 @@ def graph(self):
qdb.sql_connection.TRN.add(sql, [self.id])
db_edges = qdb.sql_connection.TRN.execute_fetchindex()
+ # let's track what nodes are actually being used so if they do not
+ # have an edge we still return them as part of the graph
+ used_nodes = nodes.copy()
for edge_id, p_id, c_id in db_edges:
e = DefaultWorkflowEdge(edge_id)
g.add_edge(nodes[p_id], nodes[c_id], connections=e)
+ if p_id in used_nodes:
+ del used_nodes[p_id]
+ if c_id in used_nodes:
+ del used_nodes[c_id]
+ # adding the missing nodes
+ for ms in used_nodes:
+ g.add_node(nodes[ms])
+
return g
@property
diff --git a/qiita_db/support_files/patches/91.sql b/qiita_db/support_files/patches/91.sql
new file mode 100644
index 000000000..25d2587b2
--- /dev/null
+++ b/qiita_db/support_files/patches/91.sql
@@ -0,0 +1,3 @@
+-- Just an empty SQL to allow the changes implemented in
+-- https://github.com/qiita-spots/qiita/pull/3403 to take effect
+SELECT 1;
diff --git a/qiita_db/support_files/patches/92.sql b/qiita_db/support_files/patches/92.sql
index 19d284b36..37576bca2 100644
--- a/qiita_db/support_files/patches/92.sql
+++ b/qiita_db/support_files/patches/92.sql
@@ -39,3 +39,27 @@ ALTER TABLE qiita.qiita_user
ADD creation_timestamp timestamp without time zone DEFAULT NOW();
COMMENT ON COLUMN qiita.qiita_user.creation_timestamp IS 'The date the user account was created';
+
+-- Jun 28, 2024
+-- These columns were added by mistake to qiita-db-unpatched.sql in PR:
+-- https://github.com/qiita-spots/qiita/pull/3412 so adding here now
+
+ALTER TABLE qiita.qiita_user ADD social_orcid character varying DEFAULT NULL;
+ALTER TABLE qiita.qiita_user ADD social_researchgate character varying DEFAULT NULL;
+ALTER TABLE qiita.qiita_user ADD social_googlescholar character varying DEFAULT NULL;
+
+-- Jul 1, 2024
+-- Add human_reads_filter_method so we can keep track of the available methods
+-- and link them to the preparations
+
+CREATE TABLE qiita.human_reads_filter_method (
+ human_reads_filter_method_id SERIAL PRIMARY KEY,
+ human_reads_filter_method character varying NOT NULL
+);
+
+ALTER TABLE qiita.artifact
+ ADD human_reads_filter_method_id bigint DEFAULT NULL;
+ALTER TABLE qiita.artifact
+ ADD CONSTRAINT fk_human_reads_filter_method
+ FOREIGN KEY ( human_reads_filter_method_id )
+ REFERENCES qiita.human_reads_filter_method ( human_reads_filter_method_id );
diff --git a/qiita_db/support_files/patches/93.sql b/qiita_db/support_files/patches/93.sql
new file mode 100644
index 000000000..4befc74d6
--- /dev/null
+++ b/qiita_db/support_files/patches/93.sql
@@ -0,0 +1,64 @@
+-- Oct 18, 2024
+-- ProcessingJob.create can take up to 52 seconds if creating a complete_job; mainly
+-- due to the number of jobs of this command and using json. The solution in the database
+-- is to convert to jsonb and index the values of the database
+
+-- ### This are the stats before the change in a single example
+-- GroupAggregate (cost=67081.81..67081.83 rows=1 width=77) (actual time=51859.962..51862.637 rows=1 loops=1)
+-- Group Key: processing_job.processing_job_id, processing_job_status.processing_job_status
+-- -> Sort (cost=67081.81..67081.81 rows=1 width=77) (actual time=51859.952..51862.627 rows=1 loops=1)
+-- Sort Key: processing_job.processing_job_id, processing_job_status.processing_job_status
+-- Sort Method: quicksort Memory: 25kB
+-- -> Nested Loop Left Join (cost=4241.74..67081.80 rows=1 width=77) (actual time=51859.926..51862.604 rows=1 loops=1)
+-- -> Nested Loop (cost=4237.30..67069.64 rows=1 width=69) (actual time=51859.889..51862.566 rows=1 loops=1)
+-- Join Filter: (processing_job.processing_job_status_id = processing_job_status.processing_job_status_id)
+-- Rows Removed by Join Filter: 1
+-- -> Gather (cost=4237.30..67068.50 rows=1 width=45) (actual time=51859.846..51862.522 rows=1 loops=1)
+-- Workers Planned: 2
+-- Workers Launched: 2
+-- -> Parallel Bitmap Heap Scan on processing_job (cost=3237.30..66068.40 rows=1 width=45) (actual time=51785.317..51785.446 rows=0 loops=3)
+-- Recheck Cond: (command_id = 83)
+-- Filter: (((command_parameters ->> 'job_id'::text) ~~* '3432a908-f7b8-4e36-89fc-88f3310b84d5'::text) AND ((command_parameters ->> '
+-- payload'::text) ~~* '{"success": true, "error": "", "artifacts": {"alpha_diversity": {"artifact_type": "alpha_vector", "filepaths": [["/qmounts/qiita_test_data/tes
+-- tlocal/working_dir/3432a908-f7b8-4e36-89fc-88f3310b84d5/alpha_phylogenetic/alpha_diversity/alpha-diversity.tsv", "plain_text"], ["/qmounts/qiita_test_data/testloca
+-- l/working_dir/3432a908-f7b8-4e36-89fc-88f3310b84d5/alpha_phylogenetic/alpha_diversity.qza", "qza"]], "archive": {}}}}'::text))
+-- Rows Removed by Filter: 97315
+-- Heap Blocks: exact=20133
+-- -> Bitmap Index Scan on idx_processing_job_command_id (cost=0.00..3237.30 rows=294517 width=0) (actual time=41.569..41.569 rows=
+-- 293054 loops=1)
+-- Index Cond: (command_id = 83)
+-- -> Seq Scan on processing_job_status (cost=0.00..1.09 rows=4 width=40) (actual time=0.035..0.035 rows=2 loops=1)
+-- Filter: ((processing_job_status)::text = ANY ('{success,waiting,running,in_construction}'::text[]))
+-- Rows Removed by Filter: 1
+-- -> Bitmap Heap Scan on artifact_output_processing_job aopj (cost=4.43..12.14 rows=2 width=24) (actual time=0.031..0.031 rows=0 loops=1)
+-- Recheck Cond: (processing_job.processing_job_id = processing_job_id)
+-- -> Bitmap Index Scan on idx_artifact_output_processing_job_job (cost=0.00..4.43 rows=2 width=0) (actual time=0.026..0.026 rows=0 loops=1)
+-- Index Cond: (processing_job_id = processing_job.processing_job_id)
+-- Planning Time: 1.173 ms
+-- Execution Time: 51862.756 ms
+
+-- Note: for this to work you need to have created as admin the extension
+-- CREATE EXTENSION pg_trgm;
+CREATE EXTENSION IF NOT EXISTS "pg_trgm" WITH SCHEMA public;
+
+-- This alter table will take close to 11 min
+ALTER TABLE qiita.processing_job
+ ALTER COLUMN command_parameters TYPE JSONB USING command_parameters::jsonb;
+
+-- This indexing will take like 5 min
+CREATE INDEX IF NOT EXISTS processing_job_command_parameters_job_id ON qiita.processing_job
+ USING GIN((command_parameters->>'job_id') gin_trgm_ops);
+
+-- This indexing will take like an hour
+CREATE INDEX IF NOT EXISTS processing_job_command_parameters_payload ON qiita.processing_job
+ USING GIN((command_parameters->>'payload') gin_trgm_ops);
+
+-- After the changes
+-- 18710.404 ms
+
+--
+
+-- Nov 5, 2024
+-- Addding contraints for the slurm_reservation column
+ALTER TABLE qiita.analysis DROP CONSTRAINT IF EXISTS analysis_slurm_reservation_valid_chars;
+ALTER TABLE qiita.analysis ADD CONSTRAINT analysis_slurm_reservation_valid_chars CHECK ( slurm_reservation ~ '^[a-zA-Z0-9_]*$' );
diff --git a/qiita_db/support_files/patches/94.sql b/qiita_db/support_files/patches/94.sql
new file mode 100644
index 000000000..3b565278b
--- /dev/null
+++ b/qiita_db/support_files/patches/94.sql
@@ -0,0 +1,7 @@
+-- Jan 13, 2025
+-- Adding a table for formulas for resource allocations
+CREATE TABLE qiita.allocation_equations (
+ equation_id SERIAL PRIMARY KEY,
+ equation_name TEXT NOT NULL,
+ expression TEXT NOT NULL
+ );
\ No newline at end of file
diff --git a/qiita_db/support_files/patches/test_db_sql/92.sql b/qiita_db/support_files/patches/test_db_sql/92.sql
index 65266e9f0..938b1bb2a 100644
--- a/qiita_db/support_files/patches/test_db_sql/92.sql
+++ b/qiita_db/support_files/patches/test_db_sql/92.sql
@@ -929,11 +929,26 @@ INSERT INTO qiita.slurm_resource_allocations(processing_job_id, samples, columns
-- for testing: provide creation date for one of the existing users
-UPDATE qiita.qiita_user SET creation_timestamp = '2015-12-03 13:52:42.751331-07' WHERE email = 'test@foo.bar';
+UPDATE qiita.qiita_user SET
+ social_orcid = '0000-0002-0975-9019',
+ social_researchgate = 'Rob-Knight',
+ social_googlescholar = '_e3QL94AAAAJ',
+ creation_timestamp = '2015-12-03 13:52:42.751331-07'
+WHERE email = 'test@foo.bar';
-- Jun 20, 2024
-- Add some non-verified users to the test DB to test new admin page: /admin/purge_users/
-INSERT INTO qiita.qiita_user VALUES ('justnow@nonvalidat.ed', 5, '$2a$12$gnUi8Qg.0tvW243v889BhOBhWLIHyIJjjgaG6dxuRJkUM8nXG9Efe', 'JustNow', 'NonVeriUser', '1634 Edgemont Avenue', '303-492-1984', NULL, NULL, NULL, false, NULL, NULL, NULL, NOW());
-INSERT INTO qiita.qiita_user VALUES ('ayearago@nonvalidat.ed', 5, '$2a$12$gnUi8Qg.0tvW243v889BhOBhWLIHyIJjjgaG6dxuRJkUM8nXG9Efe', 'Oldie', 'NonVeriUser', '172 New Lane', '102-111-1984', NULL, NULL, NULL, false, NULL, NULL, NULL, NOW() - INTERVAL '1 YEAR');
-INSERT INTO qiita.qiita_user VALUES ('3Xdays@nonvalidat.ed', 5, '$2a$12$gnUi8Qg.0tvW243v889BhOBhWLIHyIJjjgaG6dxuRJkUM8nXG9Efe', 'TooLate', 'NonVeriUser', '564 C Street', '508-492-222', NULL, NULL, NULL, false, NULL, NULL, NULL, NOW() - INTERVAL '30 DAY');
+INSERT INTO qiita.qiita_user VALUES ('justnow@nonvalidat.ed', 5, '$2a$12$gnUi8Qg.0tvW243v889BhOBhWLIHyIJjjgaG6dxuRJkUM8nXG9Efe', 'JustNow', 'NonVeriUser', '1634 Edgemont Avenue', '303-492-1984', NULL, NULL, NULL, false, NOW(), NULL, NULL, NULL);
+INSERT INTO qiita.qiita_user VALUES ('ayearago@nonvalidat.ed', 5, '$2a$12$gnUi8Qg.0tvW243v889BhOBhWLIHyIJjjgaG6dxuRJkUM8nXG9Efe', 'Oldie', 'NonVeriUser', '172 New Lane', '102-111-1984', NULL, NULL, NULL, false, NOW() - INTERVAL '1 YEAR', NULL, NULL, NULL);
+INSERT INTO qiita.qiita_user VALUES ('3Xdays@nonvalidat.ed', 5, '$2a$12$gnUi8Qg.0tvW243v889BhOBhWLIHyIJjjgaG6dxuRJkUM8nXG9Efe', 'TooLate', 'NonVeriUser', '564 C Street', '508-492-222', NULL, NULL, NULL, false, NOW() - INTERVAL '30 DAY', NULL, NULL, NULL);
+
+-- Jul 1, 2024
+-- Inserting a human_reads_filter_method and assigning it to the raw data in prep/artifact 1
+INSERT INTO qiita.human_reads_filter_method (
+ human_reads_filter_method)
+ VALUES (
+ 'The greatest human filtering method');
+UPDATE qiita.artifact
+ SET human_reads_filter_method_id = 1
+ WHERE artifact_id = 1;
diff --git a/qiita_db/support_files/patches/test_db_sql/94.sql b/qiita_db/support_files/patches/test_db_sql/94.sql
new file mode 100644
index 000000000..41ec7d8a7
--- /dev/null
+++ b/qiita_db/support_files/patches/test_db_sql/94.sql
@@ -0,0 +1,10 @@
+INSERT INTO qiita.allocation_equations(equation_name, expression)
+ VALUES
+ ('mem_model1', '(k * (np.log(x))) + (x * a) + b'),
+('mem_model2', '(k * (np.log(x))) + (b * ((np.log(x))**2)) + a'),
+('mem_model3', '(k * (np.log(x))) + (b * ((np.log(x))**2)) + (a * ((np.np.log(x))**3))'),
+('mem_model4', '(k * (np.log(x))) + (b * ((np.log(x))**2)) + (a * ((np.log(x))**2.5))'),
+('time_model1', 'a + b + ((np.log(x)) * k)'),
+('time_model2', 'a + (b * x) + ((np.log(x)) * k)'),
+('time_model3', 'a + (b * ((np.log(x))**2)) + ((np.log(x)) * k)'),
+('time_model4', '(a * ((np.log(x))**3)) + (b * ((np.log(x))**2)) + ((np.log(x)) * k)');
diff --git a/qiita_db/support_files/populate_test_db.sql b/qiita_db/support_files/populate_test_db.sql
index 1ca2edb66..52b282adc 100644
--- a/qiita_db/support_files/populate_test_db.sql
+++ b/qiita_db/support_files/populate_test_db.sql
@@ -50,10 +50,10 @@ INSERT INTO qiita.user_level VALUES (7, 'wet-lab admin', 'Can access the private
-- Data for Name: qiita_user; Type: TABLE DATA; Schema: qiita; Owner: antoniog
--
-INSERT INTO qiita.qiita_user VALUES ('test@foo.bar', 4, '$2a$12$gnUi8Qg.0tvW243v889BhOBhWLIHyIJjjgaG6dxuRJkUM8nXG9Efe', 'Dude', 'Nowhere University', '123 fake st, Apt 0, Faketown, CO 80302', '111-222-3344', NULL, NULL, NULL, false, '0000-0002-0975-9019', 'Rob-Knight', '_e3QL94AAAAJ');
-INSERT INTO qiita.qiita_user VALUES ('shared@foo.bar', 4, '$2a$12$gnUi8Qg.0tvW243v889BhOBhWLIHyIJjjgaG6dxuRJkUM8nXG9Efe', 'Shared', 'Nowhere University', '123 fake st, Apt 0, Faketown, CO 80302', '111-222-3344', NULL, NULL, NULL, false);
-INSERT INTO qiita.qiita_user VALUES ('admin@foo.bar', 1, '$2a$12$gnUi8Qg.0tvW243v889BhOBhWLIHyIJjjgaG6dxuRJkUM8nXG9Efe', 'Admin', 'Owner University', '312 noname st, Apt K, Nonexistantown, CO 80302', '222-444-6789', NULL, NULL, NULL, false);
-INSERT INTO qiita.qiita_user VALUES ('demo@microbio.me', 4, '$2a$12$gnUi8Qg.0tvW243v889BhOBhWLIHyIJjjgaG6dxuRJkUM8nXG9Efe', 'Demo', 'Qiita Dev', '1345 Colorado Avenue', '303-492-1984', NULL, NULL, NULL, false);
+INSERT INTO qiita.qiita_user VALUES ('test@foo.bar', 4, '$2a$12$gnUi8Qg.0tvW243v889BhOBhWLIHyIJjjgaG6dxuRJkUM8nXG9Efe', 'Dude', 'Nowhere University', '123 fake st, Apt 0, Faketown, CO 80302', '111-222-3344', NULL, NULL, NULL);
+INSERT INTO qiita.qiita_user VALUES ('shared@foo.bar', 4, '$2a$12$gnUi8Qg.0tvW243v889BhOBhWLIHyIJjjgaG6dxuRJkUM8nXG9Efe', 'Shared', 'Nowhere University', '123 fake st, Apt 0, Faketown, CO 80302', '111-222-3344', NULL, NULL, NULL);
+INSERT INTO qiita.qiita_user VALUES ('admin@foo.bar', 1, '$2a$12$gnUi8Qg.0tvW243v889BhOBhWLIHyIJjjgaG6dxuRJkUM8nXG9Efe', 'Admin', 'Owner University', '312 noname st, Apt K, Nonexistantown, CO 80302', '222-444-6789', NULL, NULL, NULL);
+INSERT INTO qiita.qiita_user VALUES ('demo@microbio.me', 4, '$2a$12$gnUi8Qg.0tvW243v889BhOBhWLIHyIJjjgaG6dxuRJkUM8nXG9Efe', 'Demo', 'Qiita Dev', '1345 Colorado Avenue', '303-492-1984', NULL, NULL, NULL);
--
@@ -88,6 +88,7 @@ INSERT INTO qiita.artifact_type VALUES (5, 'per_sample_FASTQ', NULL, true, false
INSERT INTO qiita.artifact_type VALUES (7, 'BIOM', 'BIOM table', false, false, true);
+
--
-- Data for Name: data_type; Type: TABLE DATA; Schema: qiita; Owner: antoniog
--
@@ -329,7 +330,7 @@ INSERT INTO qiita.processing_job_status VALUES (6, 'waiting', 'The job is waitin
-- Data for Name: processing_job; Type: TABLE DATA; Schema: qiita; Owner: antoniog
--
-INSERT INTO qiita.processing_job VALUES ('6d368e16-2242-4cf8-87b4-a5dc40bb890b', 'test@foo.bar', 1, '{"max_bad_run_length":3,"min_per_read_length_fraction":0.75,"sequence_max_n":0,"rev_comp_barcode":false,"rev_comp_mapping_barcodes":false,"rev_comp":false,"phred_quality_threshold":3,"barcode_type":"golay_12","max_barcode_errors":1.5,"input_data":1,"phred_offset":"auto"}', 3, NULL, NULL, NULL, NULL, false, 1284411757);
+INSERT INTO qiita.processing_job VALUES ('6d368e16-2242-4cf8-87b4-a5dc40bb890b', 'test@foo.bar', 1, '{"max_bad_run_length":3,"min_per_read_length_fraction":0.75,"sequence_max_n":0,"rev_comp_barcode":false,"rev_comp_mapping_barcodes":false,"rev_comp":false,"phred_quality_threshold":3,"barcode_type":"golay_12","max_barcode_errors":1.5,"input_data":1,"phred_offset":"auto"}', 3, NULL, NULL, NULL, NULL, false, 1284411757);
INSERT INTO qiita.processing_job VALUES ('4c7115e8-4c8e-424c-bf25-96c292ca1931', 'test@foo.bar', 1, '{"max_bad_run_length":3,"min_per_read_length_fraction":0.75,"sequence_max_n":0,"rev_comp_barcode":false,"rev_comp_mapping_barcodes":true,"rev_comp":false,"phred_quality_threshold":3,"barcode_type":"golay_12","max_barcode_errors":1.5,"input_data":1,"phred_offset":"auto"}', 3, NULL, NULL, NULL, NULL, false, 1287244546);
INSERT INTO qiita.processing_job VALUES ('3c9991ab-6c14-4368-a48c-841e8837a79c', 'test@foo.bar', 3, '{"reference":1,"sortmerna_e_value":1,"sortmerna_max_pos":10000,"similarity":0.97,"sortmerna_coverage":0.97,"threads":1,"input_data":2}', 3, NULL, NULL, NULL, NULL, false, 1284411377);
INSERT INTO qiita.processing_job VALUES ('b72369f9-a886-4193-8d3d-f7b504168e75', 'shared@foo.bar', 1, '{"max_bad_run_length":3,"min_per_read_length_fraction":0.75,"sequence_max_n":0,"rev_comp_barcode":false,"rev_comp_mapping_barcodes":true,"rev_comp":false,"phred_quality_threshold":3,"barcode_type":"golay_12","max_barcode_errors":1.5,"input_data":1,"phred_offset":"auto"}', 3, NULL, '2015-11-22 21:15:00', NULL, NULL, false, 128552986);
@@ -1336,7 +1337,7 @@ INSERT INTO qiita.study_users VALUES (1, 'shared@foo.bar');
INSERT INTO qiita.term VALUES (2052508974, 999999999, NULL, 'WGS', 'ENA:0000059', NULL, NULL, NULL, NULL, NULL, false);
INSERT INTO qiita.term VALUES (2052508975, 999999999, NULL, 'Metagenomics', 'ENA:0000060', NULL, NULL, NULL, NULL, NULL, false);
-INSERT INTO qiita.term VALUES (2052508976, 999999999, NULL, 'Amplicon', 'ENA:0000061', NULL, NULL, NULL, NULL, NULL, false);
+INSERT INTO qiita.term VALUES (2052508976, 999999999, NULL, 'AMPLICON', 'ENA:0000061', NULL, NULL, NULL, NULL, NULL, false);
INSERT INTO qiita.term VALUES (2052508984, 999999999, NULL, 'RNA-Seq', 'ENA:0000070', NULL, NULL, NULL, NULL, NULL, false);
INSERT INTO qiita.term VALUES (2052508987, 999999999, NULL, 'Other', 'ENA:0000069', NULL, NULL, NULL, NULL, NULL, false);
diff --git a/qiita_db/support_files/qiita-db-unpatched.sql b/qiita_db/support_files/qiita-db-unpatched.sql
index a61b4645d..1ce86de39 100644
--- a/qiita_db/support_files/qiita-db-unpatched.sql
+++ b/qiita_db/support_files/qiita-db-unpatched.sql
@@ -1888,10 +1888,7 @@ CREATE TABLE qiita.qiita_user (
user_verify_code character varying,
pass_reset_code character varying,
pass_reset_timestamp timestamp without time zone,
- receive_processing_job_emails boolean DEFAULT false,
- social_orcid character varying DEFAULT NULL,
- social_researchgate character varying DEFAULT NULL,
- social_googlescholar character varying DEFAULT NULL
+ receive_processing_job_emails boolean DEFAULT false
);
diff --git a/qiita_db/support_files/qiita-db.dbs b/qiita_db/support_files/qiita-db.dbs
index d2cb68131..1a9d03043 100644
--- a/qiita_db/support_files/qiita-db.dbs
+++ b/qiita_db/support_files/qiita-db.dbs
@@ -221,6 +221,9 @@
+
+
+
@@ -248,6 +251,9 @@
+
+
+
- {{analysis_name}} - ID {{analysis_id}}
-
- ({{analysis_description}}) -
- {% if analysis_is_public %}
- Public
- {% else %}
- Private
- {% end %}
- {% if analysis_mapping_id is not None %}
- Mapping file
- {% end %}
- {% if not analysis_is_public %}
- Make analysis public
- {% end %}
- Reservation: {% raw analysis_reservation %}
-
-
-
-
- Shared with:
-
-
- Studies and artifacts used in this analysis:
-
-
+ {{analysis_name}} - ID {{analysis_id}}
+
+ ({{analysis_description}}) - {% if analysis_is_public %}
+ Public
+ {% else %}
+ Private
+ {% end %} {% if analysis_mapping_id is not None %}
+
+ Mapping file
+ {% end %} {% if not analysis_is_public %}
+ Make
+ analysis public
+ {% end %}
+
+ Reservation: {% raw analysis_reservation %}
+
+
-
-
-
-
- Adding or removing email addresses automatically updates who the analysis is shared with. Once you click the `X` or give mouse focus to the analysis page you'll see your new sharing settings.
+
+
+
+
+
+
+
+ Modify Sharing Settings
+
-
-
-
-
-
-
-
-
-
-
Modify Reservation Setting
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+ Modify Reservation Setting
+
+
+
+
+
+
+
+
+
+
+
+
+
+
{% end %}
diff --git a/qiita_pet/templates/list_analyses.html b/qiita_pet/templates/list_analyses.html
index bcfc79207..6ef6280fc 100644
--- a/qiita_pet/templates/list_analyses.html
+++ b/qiita_pet/templates/list_analyses.html
@@ -145,6 +145,7 @@
{{name}} - ID {{prep_id}} ({{data_type}})
{% if user_level in ('admin', 'wet-lab admin') and creation_job is not None %}
- SampleSheet
+ SampleSheet
{% if creation_job_artifact_summary is not None %}
Creation Job Output
{% end %}
@@ -447,6 +447,11 @@
{% end %}
{% if editable %}
+ {% if user_level in ('admin', 'wet-lab admin') and data_type in {'Metagenomic', 'Metatranscriptomic'} %}
+
+ {% end %}
{% if deprecated %}
Remove Deprecation
{% else%}
@@ -467,6 +472,10 @@
+ {% if human_reads_filter_method is not None %}
+ The raw data of this preparation was pre-processed via: {{ human_reads_filter_method }}
+ {% end %}
+