diff --git a/Gemfile.lock b/Gemfile.lock index 1e5ea7a515..f66521c49f 100644 --- a/Gemfile.lock +++ b/Gemfile.lock @@ -362,7 +362,7 @@ GEM psych (5.1.2) stringio public_suffix (5.0.4) - puma (5.6.8) + puma (5.6.9) nio4r (~> 2.0) racc (1.8.0) rack (2.2.9) @@ -520,7 +520,7 @@ GEM zeitwerk (~> 2.2) warden (1.2.9) rack (>= 2.0.9) - webrick (1.8.1) + webrick (1.8.2) websocket-driver (0.7.6) websocket-extensions (>= 0.1.0) websocket-extensions (0.1.5) diff --git a/app/assets/images/cellarium.png b/app/assets/images/cellarium.png new file mode 100644 index 0000000000..01c96b45e1 Binary files /dev/null and b/app/assets/images/cellarium.png differ diff --git a/app/controllers/site_controller.rb b/app/controllers/site_controller.rb index 230e0d9da7..84428bc6d8 100644 --- a/app/controllers/site_controller.rb +++ b/app/controllers/site_controller.rb @@ -61,6 +61,7 @@ def index @cell_count = 0 end + @home_page_link = HomePageLink.published end def covid diff --git a/app/javascript/components/explore/plot-data-cache.js b/app/javascript/components/explore/plot-data-cache.js index 78ac277ea5..abdd92178a 100644 --- a/app/javascript/components/explore/plot-data-cache.js +++ b/app/javascript/components/explore/plot-data-cache.js @@ -289,7 +289,8 @@ export function createCache() { Fields.cellsAndCoords.merge(cacheEntry, scatter) // only merge in annotation values if the annotation matches (or the default was requested, so // we can then assume the response matches) - if (!requestedAnnotation.name || scatter.annotParams.name === requestedAnnotation.name) { + // annotParams may be undefined in spatial UX if a cluster-based annotation does not exist for the plot + if (!requestedAnnotation.name || scatter.annotParams?.name === requestedAnnotation.name) { Fields.annotation.merge(cacheEntry, scatter) } if (scatter.genes.length && scatter.genes.join('') === requestedGenes.join('')) { diff --git a/app/javascript/components/upload/FileUploadControl.jsx b/app/javascript/components/upload/FileUploadControl.jsx index be806d26cc..6a26dbf902 100644 --- a/app/javascript/components/upload/FileUploadControl.jsx +++ b/app/javascript/components/upload/FileUploadControl.jsx @@ -28,6 +28,10 @@ export default function FileUploadControl({ const [showUploadButton, setShowUploadButton] = useState(true) const [showBucketPath, setShowBucketPath] = useState(false) const ToggleUploadButton = () => { + // this is an inverted check since the user is clicking and the value is about to change + if (!showUploadButton) { + unsetRemoteLocation() + } setShowUploadButton(!showUploadButton) setShowBucketPath(!showBucketPath) } @@ -38,7 +42,7 @@ export default function FileUploadControl({ 'Upload a file from your computer' : "Input a path to a file that is already in this study's bucket" const uploadToggle = {toggleText} @@ -51,7 +55,7 @@ export default function FileUploadControl({ const googleBucketLink = - Browse bucket @@ -77,20 +81,53 @@ export default function FileUploadControl({ name: newName, notes }) + } else if (issues.errors.length > 0 && file.uploadSelection) { + // clear out a previous known good file, if present + updateFile(file._id, { + uploadSelection: null, + upload_file_name: '', + name: '' + }) } } + // keep track of pending timeout for remote validation via bucket path + const [timeOutId, setTimeOutID] = useState(null) + + // clear out remote_location and hasRemoteFile to allow switching back to file upload button + function unsetRemoteLocation() { + updateFile(file._id, {remote_location: '', hasRemoteFile: false}) + } + // perform CSFV on remote file when specifying a GS URL or bucket path // will sanitize GS URL before calling validateRemoteFile - async function handleBucketLocationEntry(e) { - const path = e.target.value + async function handleBucketLocationEntry(path) { const matcher = new RegExp(`(gs:\/\/)?${bucketName}\/?`) const trimmedPath = path.replace(matcher, '') if (!trimmedPath) { + unsetRemoteLocation() + setFileValidation({ validating: false, issues: {}, fileName: null }) return false } + // don't continue unless a dot is present (otherwise, no valid file extension) + if (!trimmedPath.includes('.')) { return false } + const fileType = file.file_type + const fileExtension = `.${trimmedPath.split('.').slice(-1)[0]}` + if (fileExtension.length > 1 && !inputAcceptExts.includes(fileExtension)) { + const invalidExt = { + errors: [ + [ + 'error', 'filename:extension', + `Allowed extensions are ${allowedFileExts.join(', ')}` + ] + ] + } + setFileValidation({ validating: false, issues: invalidExt, fileName: trimmedPath }) + return false + } + const fileOptions = fileType === 'Metadata' ? { use_metadata_convention: file?.use_metadata_convention } : {} setFileValidation({ validating: true, issues: {}, fileName: trimmedPath }) @@ -175,6 +212,7 @@ export default function FileUploadControl({ /> } + {!isFileOnServer && (showBucketPath || file.hasRemoteFile ) && // we can't use TextFormField since we need a custom onBlur event // onBlur is the React equivalent of onfocusout, which will fire after the user is done updating the input @@ -182,15 +220,22 @@ export default function FileUploadControl({ type="text" size={60} id={`remote_location-input-${file._id}`} + data-testid="remote-location-input" placeholder='GS URL or path to file in GCP bucket' - onBlur={handleBucketLocationEntry}/> + onChange={ (e) => { + const newBucketPath = e.target.value + if (timeOutId) { clearTimeout(timeOutId) } + const newTimeout = setTimeout(handleBucketLocationEntry, 300, newBucketPath) + setTimeOutID(newTimeout) + }}/> } -    { !isFileOnServer && (showBucketPath || file.hasRemoteFile ) && googleBucketLink } -    { !isFileOnServer && uploadToggle } + { showBucketPath && fileValidation.validating && + Validating... + } { setIsAnnDataExperience(true) setOverrideExperienceMode(true) - }}> AnnData BETA + }}> AnnData
Upload one AnnData (.h5ad) file diff --git a/app/javascript/components/upload/UploadWizard.jsx b/app/javascript/components/upload/UploadWizard.jsx index 4da9cc59ef..18b9343665 100644 --- a/app/javascript/components/upload/UploadWizard.jsx +++ b/app/javascript/components/upload/UploadWizard.jsx @@ -275,12 +275,17 @@ export function RawUploadWizard({ studyAccession, name }) { setTimeout(() => deleteFileFromServer(requestCanceller.fileId), 500) } + /** helper for determining when to use saveAnnDataFileHelper (sets ids/values correctly for AnnData UX **/ + function useAnnDataFileHelper(file) { + return isAnnDataExperience && (file?.file_type === 'AnnData' || Object.keys(file).includes("data_type")) + } + /** save the given file and perform an upload if a selected file is present */ async function saveFile(file) { let fileToSave = file let studyFileId = file._id - if (isAnnDataExperience && fileToSave?.file_type === 'AnnData') { + if (useAnnDataFileHelper(fileToSave)) { fileToSave = saveAnnDataFileHelper(file, fileToSave) studyFileId = fileToSave._id } diff --git a/app/javascript/components/upload/WizardNavPanel.jsx b/app/javascript/components/upload/WizardNavPanel.jsx index 44123b1d28..b7e9558520 100644 --- a/app/javascript/components/upload/WizardNavPanel.jsx +++ b/app/javascript/components/upload/WizardNavPanel.jsx @@ -222,7 +222,7 @@ function MainStepsDisplay(formState, serverState, currentStep, setCurrentStep, m - AnnData BETA + AnnData diff --git a/app/javascript/components/visualization/ScatterPlot.jsx b/app/javascript/components/visualization/ScatterPlot.jsx index 0062c3949e..4cca8f7494 100644 --- a/app/javascript/components/visualization/ScatterPlot.jsx +++ b/app/javascript/components/visualization/ScatterPlot.jsx @@ -598,17 +598,15 @@ function RawScatterPlot({ return (
{ ErrorComponent } + { hasMissingAnnot &&
"{cluster}" does not have the requested annotation "{loadedAnnotation}"
} - { !hasMissingAnnot && - - }
{ + if (name.startsWith('ontologies-') && name !== currentOntologies) { + caches.delete(name) + } + }) + + const cache = await caches.open(currentOntologies) + + return cache +} + +/** Fetch .gz file, decompress it, return plaintext */ +export async function fetchGzipped(url) { + const response = await fetch(url) + const blob = await response.blob() + const uint8Array = new Uint8Array(await blob.arrayBuffer()) + const plaintext = strFromU8(decompressSync(uint8Array)) + return plaintext +} + +/** Fetch from service worker cache if available, from remote otherwise */ +export async function cacheFetch(url) { + const cache = await getServiceWorkerCache() + + const decompressedUrl = url.replace('.gz', '') + const response = await cache.match(decompressedUrl) + if (typeof response === 'undefined') { + // If cache miss, then fetch, decompress, and put response in cache + const data = await fetchGzipped(url) + const contentLength = data.length + const decompressedResponse = new Response( + data, + { + headers: new Headers({ + 'Content-Length': contentLength, + 'Content-Type': 'text/tab-separated-values' + }) + } + ) + await cache.put(decompressedUrl, decompressedResponse) + return await cache.match(decompressedUrl) + } + return await cache.match(decompressedUrl) +} + +/** + * Fetch minified ontologies, transform into object of object of arrays, e.g.: + * + * { + * 'mondo': { + * 'MONDO_0008315': ['prostate cancer', 'prostate neoplasm', 'prostatic neoplasm'], + * 'MONDO_0018076': ['tuberculosis', 'TB'], + * ... + * }, + * 'ncbitaxon': { + * 'NCBITaxon_9606': ['Homo sapiens', 'human'], + * 'NCBITaxon_10090': ['Mus musculus', 'house mouse', 'mouse'], + * ... + * }, + * ... + * } + */ +export async function fetchOntologies() { + if (window.SCP.ontologies) { + // Reuse fetched, processed ontologies from this page load + return window.SCP.ontologies + } + + const ontologies = {} + + const ontologyNames = getOntologyShortNames() + + for (let i = 0; i < ontologyNames.length; i++) { + const ontologyName = ontologyNames[i] + const ontologyUrl = `${ONTOLOGY_BASE_URL + ontologyName}.min.tsv.gz` + const response = await cacheFetch(ontologyUrl) + + const tsv = await response.text() + const lines = tsv.split('\n') + + ontologies[ontologyName] = {} + + for (let i = 0; i < lines.length; i++) { + const line = lines[i] + const [ontologyId, label, rawSynonyms] = line.split('\t') + let names = [label] + if (rawSynonyms) { + const synonyms = rawSynonyms.split('||') + names = names.concat(synonyms) + } + ontologies[ontologyName][ontologyId] = names + } + } + + window.SCP.ontologies = ontologies + return ontologies +} + +/** Get lowercase shortnames for all required ontologies */ +function getOntologyShortNames() { + let requiredOntologies = [] + + // Validate IDs for species, organ, disease, and library preparation protocol + for (let i = 0; i < REQUIRED_CONVENTION_COLUMNS.length; i++) { + const column = REQUIRED_CONVENTION_COLUMNS[i] + if (!column.endsWith('__ontology_label')) {continue} + const key = column.split('__ontology_label')[0] + const ontologies = getAcceptedOntologies(key, metadataSchema) + requiredOntologies = requiredOntologies.concat(ontologies) + } + + requiredOntologies = Array.from( + new Set(requiredOntologies.map(o => o.toLowerCase())) + ) + + return requiredOntologies +} + +/** + * Get list of ontology names accepted for key from metadata schema + * + * E.g. "disease" -> ["MONDO", "PATO"] + */ +export function getAcceptedOntologies(key, metadataSchema) { + // E.g. "ontology_browser_url": "https://www.ebi.ac.uk/ols/ontologies/mondo,https://www.ebi.ac.uk/ols/ontologies/pato" + const olsUrls = metadataSchema.properties[key].ontology + + const acceptedOntologies = + olsUrls?.split(',').map(url => url.split('/').slice(-1)[0].toUpperCase()) + + if (acceptedOntologies.includes('NCBITAXON')) { + acceptedOntologies.push('NCBITaxon') + } + + return acceptedOntologies +} diff --git a/app/javascript/lib/validation/validate-anndata.js b/app/javascript/lib/validation/validate-anndata.js index d05fbe7bd9..7e5f4fc497 100644 --- a/app/javascript/lib/validation/validate-anndata.js +++ b/app/javascript/lib/validation/validate-anndata.js @@ -1,10 +1,11 @@ import {openH5File} from 'hdf5-indexed-reader' +import { getOAuthToken } from '~/lib/scp-api' import { validateUnique, validateRequiredMetadataColumns, metadataSchema, REQUIRED_CONVENTION_COLUMNS } from './shared-validation' -import { getOAuthToken } from '~/lib/scp-api' +import { getAcceptedOntologies, fetchOntologies } from './ontology-validation' /** Get ontology ID values for key in AnnData file */ async function getOntologyIds(key, hdf5File) { @@ -90,25 +91,6 @@ export async function getAnnDataHeaders(hdf5File) { return headers } -/** - * Get list of ontology names accepted for key from metadata schema - * - * E.g. "disease" -> ["MONDO", "PATO"] - */ -function getAcceptedOntologies(key, metadataSchema) { - // E.g. "ontology_browser_url": "https://www.ebi.ac.uk/ols/ontologies/mondo,https://www.ebi.ac.uk/ols/ontologies/pato" - const olsUrls = metadataSchema.properties[key].ontology - - const acceptedOntologies = - olsUrls?.split(',').map(url => url.split('/').slice(-1)[0].toUpperCase()) - - if (acceptedOntologies.includes('NCBITAXON')) { - acceptedOntologies.push('NCBITaxon') - } - - return acceptedOntologies -} - /** * Check format of ontology IDs for key, return updated issues array * @@ -138,6 +120,127 @@ export function checkOntologyIdFormat(key, ontologyIds) { return issues } +/** Validate author's annotation labels and IDs match those in ontologies */ +async function checkOntologyLabelsAndIds(key, ontologies, groups) { + const [ids, idIndexes, labels, labelIndexes] = groups + + const issues = [] + + // Determine unique (ontology ID, ontology label) pairs + const labelIdPairs = new Set() + for (let i = 0; i < idIndexes.length; i++) { + const id = ids[idIndexes[i]] + const label = labels[labelIndexes[i]] + labelIdPairs.add(`${id} || ${label}`) + } + const rawUniques = Array.from(labelIdPairs) + + rawUniques.map(r => { + const [id, label] = r.split(' || ') + const ontologyShortNameLc = id.split(/[_:]/)[0].toLowerCase() + const ontology = ontologies[ontologyShortNameLc] + + if (!(id in ontology)) { + // Register invalid ontology ID + const msg = `Invalid ontology ID: ${id}` + issues.push([ + 'error', 'ontology:label-lookup-error', msg, + { subtype: 'ontology:invalid-id' } + ]) + } else { + const validLabels = ontology[id] + + if (!(validLabels.includes(label))) { + // Register invalid ontology label + const prettyLabels = validLabels.join(', ') + const validLabelsClause = `Valid labels for ${id}: ${prettyLabels}` + const msg = `Invalid ${key} label "${label}". ${validLabelsClause}` + issues.push([ + 'error', 'ontology:label-lookup-error', msg, + { subtype: 'ontology:invalid-label' } + ]) + } + } + }) + + return issues +} + +/** Get ontology ID values for key in AnnData file */ +async function getOntologyIdsAndLabels(requiredName, hdf5File) { + const obs = await hdf5File.get('obs') + const obsValues = await Promise.all(obs.values) + + // Old versions of the AnnData spec used __categories as an obs. + // However, in new versions (since before 2023-01-23) of AnnData spec, + // categorical arrays are encoded as self-contained groups containing their + // own `categories` and `codes`. + // See e.g. https://github.com/scverse/anndata/issues/879 + const internalCategories = obsValues.find(o => o.name.endsWith('__categories')) + if (internalCategories) { + console.debug( + 'Encountered old-spec AnnData, skipping ontology label validation. ' + + 'Server-side processing will validate this' + ) + return null + } + + const idKey = requiredName + const labelKey = `${requiredName}__ontology_label` + + const idGroup = obsValues.find(o => o.name.endsWith(idKey)) + const labelGroup = obsValues.find(o => o.name.endsWith(labelKey)) + + // AnnData organizes each "obs" annotation (e.g. disease__ontology_label, + // sex) into a container with a `categories` frame and a `code` frame. + // + // - categories: external values, non-redundant array. E.g.: + // ["tuberculosis", "TB", "foo"] or ["female"] + // + // - codes: internal values, redundant array of integers that specify + // the index (position) of each category value in the array of obs + // (cells) + // + // This organization greatly decreases filesize, but requires more code + // to map paired obs annotations like `disease` (ontology IDs) to + // `disease__ontology_label` (ontology names) than needed for e.g. TSVs. + const idCategories = await idGroup.values[0] + const idCodes = await idGroup.values[1] + const ids = await idCategories.value + const idIndexes = await idCodes.value + + const labelCategories = await labelGroup.values[0] + const labelCodes = await labelGroup.values[1] + const labels = await labelCategories.value + const labelIndexes = await labelCodes.value + + return [ids, idIndexes, labels, labelIndexes] +} + +/** Validate ontology labels for required metadata columns in AnnData file */ +async function validateOntologyLabelsAndIds(hdf5File) { + let issues = [] + + const ontologies = await fetchOntologies() + + // Validate IDs for species, organ, disease, and library preparation protocol + for (let i = 0; i < REQUIRED_CONVENTION_COLUMNS.length; i++) { + const column = REQUIRED_CONVENTION_COLUMNS[i] + if (!column.endsWith('__ontology_label')) {continue} + const key = column.split('__ontology_label')[0] + const groups = await getOntologyIdsAndLabels(key, hdf5File) + + if (groups) { + issues = issues.concat( + await checkOntologyLabelsAndIds(key, ontologies, groups) + ) + } + } + + return issues +} + + /** Validate ontology IDs for required metadata columns in AnnData file */ async function validateOntologyIdFormat(hdf5File) { let issues = [] @@ -165,21 +268,26 @@ export async function parseAnnDataFile(fileOrUrl, remoteProps) { const headers = await getAnnDataHeaders(hdf5File) - // TODO (SCP-5770): Extend AnnData CSFV to remote files, then remove this - if (!headers) { - return { issues } - } - const requiredMetadataIssues = validateRequiredMetadataColumns([headers], true) let ontologyIdFormatIssues = [] + let ontologyLabelAndIdIssues = [] if (requiredMetadataIssues.length === 0) { ontologyIdFormatIssues = await validateOntologyIdFormat(hdf5File) + if ( + ontologyIdFormatIssues.length === 0 && + + // TODO (SCP-5813): Enable ontology validation for remote AnnData + remoteProps && 'url' in remoteProps === false + ) { + ontologyLabelAndIdIssues = await validateOntologyLabelsAndIds(hdf5File) + } } issues = issues.concat( validateUnique(headers), requiredMetadataIssues, - ontologyIdFormatIssues + ontologyIdFormatIssues, + ontologyLabelAndIdIssues ) return { issues } diff --git a/app/javascript/styles/_brand.scss b/app/javascript/styles/_brand.scss index 69623bdad2..097928b994 100644 --- a/app/javascript/styles/_brand.scss +++ b/app/javascript/styles/_brand.scss @@ -252,11 +252,17 @@ } } -#whitepaper-btn { +#home-page-link { position: absolute; top: 220px; right: 15px; z-index: 500; + + img { + height: 24px; + object-fit: scale-down; + padding-right: 5px; + } } #latest-features-btn { diff --git a/app/javascript/styles/_global.scss b/app/javascript/styles/_global.scss index 1616a0dc19..2bd1cded46 100644 --- a/app/javascript/styles/_global.scss +++ b/app/javascript/styles/_global.scss @@ -941,6 +941,16 @@ figcaption.ck-editor__editable { color: #C45500; } +.btn-home-link { + color: #fff; + box-shadow: 8px 8px 12px #2b2b2b; +} + +.btn-home-link:hover { + color: #fff; + filter: brightness(90%); +} + .link-darker-blue { color: #1b476d } diff --git a/app/lib/download_quota_service.rb b/app/lib/download_quota_service.rb index 6877d28afd..60f014b955 100644 --- a/app/lib/download_quota_service.rb +++ b/app/lib/download_quota_service.rb @@ -19,7 +19,7 @@ def self.grant_user_exemption(user) def self.download_exceeds_quota?(user, requested_bytes) return false if user.daily_download_quota.nil? - user_quota = user.daily_download_quota + requested_bytes + user_quota = user.daily_download_quota + requested_bytes.to_i user_quota > download_quota end @@ -28,7 +28,7 @@ def self.increment_user_quota(user, requested_bytes) # skip incrementing quota if exemption is set via nil value return true if user.daily_download_quota.nil? - user_quota = user.daily_download_quota + requested_bytes + user_quota = user.daily_download_quota + requested_bytes.to_i user.update(daily_download_quota: user_quota) end diff --git a/app/mailers/single_cell_mailer.rb b/app/mailers/single_cell_mailer.rb index ddbf128657..61c8bbebcb 100644 --- a/app/mailers/single_cell_mailer.rb +++ b/app/mailers/single_cell_mailer.rb @@ -38,7 +38,7 @@ def notify_admin_upload_fail(study_file, error) @study_file = study_file @error = error @user = @study.user - mail(to: emails, subject: '[Single Cell Portal ERROR] FireCloud auto-upload fail in ' + @study.accession) do |format| + mail(to: emails, subject: "[Single Cell Portal ERROR] FireCloud auto-upload fail in #{@study.accession}") do |format| format.html end end @@ -51,19 +51,19 @@ def notify_user_upload_fail(study_file, study, user) dev_email_config = AdminConfiguration.find_by(config_type: 'QA Dev Email') dev_email = dev_email_config.present? ? dev_email_config.value : nil title = "#{study_file.upload_file_name} did not finish uploading" - mail(to: user.email, bcc: dev_email, subject: '[Single Cell Portal Notifier] ' + title) + mail(to: user.email, bcc: dev_email, subject: "[Single Cell Portal Notifier] #{title}") end def notify_user_parse_complete(email, title, message, study) @message = message @study = study - mail(to: email, subject: '[Single Cell Portal Notifier] ' + title) + mail(to: email, subject: "[Single Cell Portal Notifier] #{title}") end def notify_user_parse_fail(email, title, error, study) @error = error @study = study - mail(to: email, subject: '[Single Cell Portal Notifier] ' + title) + mail(to: email, subject: "[Single Cell Portal Notifier] #{title}") end def notify_admin_parse_fail(user_email, title, contents) @@ -72,7 +72,7 @@ def notify_admin_parse_fail(user_email, title, contents) dev_email = dev_email_config.value @contents = contents @user_email = user_email - mail(to: dev_email, subject: '[Single Cell Portal Admin Notification] ' + title) + mail(to: dev_email, subject: "[Single Cell Portal Admin Notification] #{title}") end end @@ -91,7 +91,7 @@ def notify_admin_parse_launch_fail(study, study_file, corresponding_user, ingest @error = error @action = ingest_action title = 'Ingest Pipeline Launch Failure' - mail(to: emails, subject: '[Single Cell Portal Admin Notification] ' + title) + mail(to: emails, subject: "[Single Cell Portal Admin Notification] #{title}") end def daily_disk_status diff --git a/app/models/ann_data_file_info.rb b/app/models/ann_data_file_info.rb index 3122281736..a0ce6f4d5b 100644 --- a/app/models/ann_data_file_info.rb +++ b/app/models/ann_data_file_info.rb @@ -39,7 +39,7 @@ class AnnDataFileInfo # } # { _id: '6033f531e241391884633748', data_type: :expression, description: 'log(TMP) expression' } field :data_fragments, type: Array, default: [] - before_validation :sanitize_fragments! + before_validation :set_default_cluster_fragments!, :sanitize_fragments! validate :validate_fragments after_validation :update_expression_file_info @@ -195,6 +195,20 @@ def sanitize_fragments! self.data_fragments = sanitized_fragments end + # create the default cluster data_fragment entries + def set_default_cluster_fragments! + return false if fragments_by_type(:cluster).any? + + default_obsm_keys = AnnDataIngestParameters::PARAM_DEFAULTS[:obsm_keys] + default_obsm_keys.each do |obsm_key_name| + name = obsm_key_name.delete_prefix('X_') + fragment = { + _id: BSON::ObjectId.new.to_s, data_type: :cluster, name:, obsm_key_name:, spatial_cluster_associations: [] + } + data_fragments << fragment + end + end + # ensure all fragments have required keys and are unique def validate_fragments REQUIRED_FRAGMENT_KEYS.each do |data_type, keys| diff --git a/app/models/ann_data_ingest_parameters.rb b/app/models/ann_data_ingest_parameters.rb index 82729cd914..be3f26fd8b 100644 --- a/app/models/ann_data_ingest_parameters.rb +++ b/app/models/ann_data_ingest_parameters.rb @@ -32,7 +32,7 @@ class AnnDataIngestParameters PARAM_DEFAULTS = { ingest_anndata: true, anndata_file: nil, - obsm_keys: %w[X_umap X_tsne], + obsm_keys: %w[X_umap], ingest_cluster: false, cluster_file: nil, name: nil, diff --git a/app/models/home_page_link.rb b/app/models/home_page_link.rb new file mode 100644 index 0000000000..b28b3ade30 --- /dev/null +++ b/app/models/home_page_link.rb @@ -0,0 +1,78 @@ +class HomePageLink + include Mongoid::Document + include Mongoid::Timestamps + + DEFAULT_CSS_CLASS='btn btn-home-link'.freeze + DEFAULT_BG_COLOR='#4999F9'.freeze + + field :name, type: String + field :href, type: String + field :tooltip, type: String + field :bg_color, type: String, default: DEFAULT_BG_COLOR + field :css_class, type: String, default: DEFAULT_CSS_CLASS + field :published, type: Mongoid::Boolean, default: false + field :image, type: String + + validates :name, :href, presence: true + validate :ensure_one_published_link + + def publish! + update(published: true) + end + + def unpublish! + update(published: false) + end + + def reset_css! + puts "Resetting css_class to '#{DEFAULT_CSS_CLASS}'" + update(css_class: DEFAULT_CSS_CLASS) + end + + def reset_bg_color! + puts "Resetting bg_color to '#{DEFAULT_BG_COLOR}'" + update(bg_color: DEFAULT_BG_COLOR) + end + + def self.published + self.find_by(published: true) + end + + def self.publish_last! + if published + puts "'#{published.name}' is already published" + return false + end + + link = last + if link + puts "Publishing '#{link.name}'" + link.publish! + else + puts "Nothing to publish" + end + end + + def self.unpublish! + if published.present? + puts "Unpublishing '#{published.name}'" + published.update(published: false) + else + puts "No published links" + return false + end + end + + private + + def ensure_one_published_link + if published && HomePageLink.where(published: true, :id.ne => self.id).exists? + existing = HomePageLink.published + errors.add( + :published, + "link exists: '#{existing.name}', please unpublish first with HomePageLink.unpublish!" + ) + puts errors.full_messages.to_sentence + end + end +end diff --git a/app/models/import_service_config/nemo.rb b/app/models/import_service_config/nemo.rb index c6f5f1f829..97d4e7a4ba 100644 --- a/app/models/import_service_config/nemo.rb +++ b/app/models/import_service_config/nemo.rb @@ -72,7 +72,7 @@ def study_file_default_settings # retrieve common species names from associated collection def taxon_names - load_study&.[]('taxonomies') || [] + load_study&.[]('taxa')&.map { |t| t['name']} || [] end # map attribute names SCP Study attributes onto NeMO attribute names diff --git a/app/models/ingest_job.rb b/app/models/ingest_job.rb index 6d8869b275..16f6d15924 100644 --- a/app/models/ingest_job.rb +++ b/app/models/ingest_job.rb @@ -377,7 +377,8 @@ def poll_for_completion(run_at: 1.minute.from_now) log_error_messages log_to_mixpanel # log before queuing file for deletion to preserve properties # don't delete files or notify users if this is a 'special action', like DE or image pipeline jobs - handle_ingest_failure unless special_action? + subject = "Error: #{study_file.file_type} file: '#{study_file.upload_file_name}' parse has failed" + handle_ingest_failure(subject) unless special_action? admin_email_content = generate_error_email_body(email_type: :dev) SingleCellMailer.notify_admin_parse_fail(user.email, subject, admin_email_content).deliver_now else @@ -390,7 +391,7 @@ def poll_for_completion(run_at: 1.minute.from_now) # will automatically clean up data and notify user # in case of subsampling, only subsampled data cleanup is run and all other data is left in place # this reduces churn for study owners as full-resolution data is still valid - def handle_ingest_failure + def handle_ingest_failure(email_subject) if action.to_sym == :ingest_subsample study_file.update(parse_status: 'parsed') # reset parse flag cluster_name = cluster_name_by_file_type @@ -408,9 +409,8 @@ def handle_ingest_failure end end end - subject = "Error: #{study_file.file_type} file: '#{study_file.upload_file_name}' parse has failed" user_email_content = generate_error_email_body - SingleCellMailer.notify_user_parse_fail(user.email, subject, user_email_content, study).deliver_now + SingleCellMailer.notify_user_parse_fail(user.email, email_subject, user_email_content, study).deliver_now end # TODO (SCP-4709, SCP-4710) Processed and Raw expression files diff --git a/app/views/layouts/application.html.erb b/app/views/layouts/application.html.erb index dffd0e8167..1f462e1a2f 100644 --- a/app/views/layouts/application.html.erb +++ b/app/views/layouts/application.html.erb @@ -192,10 +192,12 @@ © <%= Date.today.year %> The Broad Institute of MIT and Harvard