diff --git a/doajtest/fixtures/article.py b/doajtest/fixtures/article.py index 3450cb6044..7c8236393e 100644 --- a/doajtest/fixtures/article.py +++ b/doajtest/fixtures/article.py @@ -199,7 +199,7 @@ def make_article_with_data(title=None, publisher_name=None, abstract=None, count "language": ["EN", "FR"], "country": "US" }, - "year": "1991", + "year": "2015", "month": "January", "start_page": "3", "end_page": "21", diff --git a/doajtest/testbook/article_xml_upload/article_doaj_xml_upload.yml b/doajtest/testbook/article_xml_upload/article_doaj_xml_upload.yml index 37852ae306..00f2170175 100644 --- a/doajtest/testbook/article_xml_upload/article_doaj_xml_upload.yml +++ b/doajtest/testbook/article_xml_upload/article_doaj_xml_upload.yml @@ -344,8 +344,6 @@ tests: - status is "complete" - Outcome Status is "fail" - - - title: Check Outcome Status of "Successfully upload a file containing a new article" context: role: admin @@ -357,3 +355,30 @@ tests: - Related background job is found - status is "complete" - Outcome Status is "success" + +- title: Upload a file before OA start date of the Journal + context: + role: publisher + steps: + - step: Ensure that the publisher account owns a journal with Print ISSN "1111-1111" + and E-ISSN "2222-2222", and these are not attached to any other journals and + that this journal does not contain "Success" or "Update" articles from previous + tests. You can use the testdrive endpoint "publisher_with_journal" to create this + path: /testdrive/publisher_with_journal + - step: Update the Journal (if needed) to ensure the OA start date is as late as possible (the current year is the latest you + can set it to) + - step: Go to the "Upload Article XML" tab in the "Publisher Area" + - step: Select "Choose file" and select the test resource file "successful.xml" + resource: /xml_upload_test_package/DOAJ/successful.xml + - step: Click "Upload" + results: + - 'A flash message appears at the top of the screen indicating a successful upload: + File uploaded and waiting to be processed. Check back here for updates.(Dismiss)' + - Your file is shown in the "History of uploads" with status "pending" + - step: wait a short amount of time for the job to process, then reload the page + (do not re-submit the form data). If the job remains in "pending", reload the + page until the status changes. + results: + - Your file is shown in the "History of uploads" with status "processing failed" + and a entry in the "Notes" and reads as 'One or more rticles cannot be uploaded, as they have a publication date before the 'OA start date' of the Journal'. Check that the explanation link goes to + a suitable reason and resolution for the problem. \ No newline at end of file diff --git a/doajtest/unit/resources/articles_metadata_form.py b/doajtest/unit/resources/articles_metadata_form.py index ade28efc83..4f8a8ffa0c 100644 --- a/doajtest/unit/resources/articles_metadata_form.py +++ b/doajtest/unit/resources/articles_metadata_form.py @@ -54,7 +54,7 @@ def update_article_doi(self, valid): 'abstract': 'This abstract has been edited', 'keywords': 'edited-1,edited-2, edited-3', 'publication_month': '10', - 'publication_year': '1987', + 'publication_year': '2013', #'pissn': '1234-5678', #'eissn': '9876-5432', 'volume': '1', diff --git a/doajtest/unit/resources/harvester_resp.json b/doajtest/unit/resources/harvester_resp.json index 133fedaf24..78094b1ab1 100644 --- a/doajtest/unit/resources/harvester_resp.json +++ b/doajtest/unit/resources/harvester_resp.json @@ -40,7 +40,7 @@ "journalIssueId": 111111, "dateOfPublication": "2010 Apr", "monthOfPublication": 1, - "yearOfPublication": 2010, + "yearOfPublication": 2013, "printPublicationDate": "2010-01-01", "journal": { "title": "My Journal", @@ -51,7 +51,7 @@ "nlmid": "123456789" } }, - "pubYear": "2010", + "pubYear": "2013", "pageInfo": "1", "affiliation": "aga@cottagelabs.com", "publicationStatus": "publish", @@ -136,9 +136,9 @@ "issue": "1", "volume": "1", "journalIssueId": 111111, - "dateOfPublication": "2010 Apr", + "dateOfPublication": "2013 Apr", "monthOfPublication": 1, - "yearOfPublication": 2010, + "yearOfPublication": 2013, "printPublicationDate": "2010-01-01", "journal": { "title": "My Journal", @@ -149,7 +149,7 @@ "nlmid": "123456789" } }, - "pubYear": "2010", + "pubYear": "2013", "pageInfo": "1", "affiliation": "aga@cottagelabs.com", "publicationStatus": "publish", diff --git a/doajtest/unit/test_crosswalks_article_ris.py b/doajtest/unit/test_crosswalks_article_ris.py index 760d819e1c..22179d6963 100644 --- a/doajtest/unit/test_crosswalks_article_ris.py +++ b/doajtest/unit/test_crosswalks_article_ris.py @@ -17,7 +17,7 @@ def test_article2ris(self): TY - JOUR T1 - Article Title AU - The Author -PY - 1991 +PY - 2015 JF - The Title PB - The Publisher VL - 1 diff --git a/doajtest/unit/test_tasks_ingestDOAJarticles.py b/doajtest/unit/test_tasks_ingestDOAJarticles.py index 4421d97ef9..24759f3b3c 100644 --- a/doajtest/unit/test_tasks_ingestDOAJarticles.py +++ b/doajtest/unit/test_tasks_ingestDOAJarticles.py @@ -23,7 +23,7 @@ from portality.crosswalks import article_doaj_xml from portality.tasks import ingestarticles from portality.ui.messages import Messages - +from portality.lib import dates class TestIngestArticlesDoajXML(DoajTestCase): @@ -1048,3 +1048,76 @@ def test_61_journal_not_indoaj(self): assert file_upload.status == "failed" assert file_upload.error == Messages.EXCEPTION_ADDING_ARTICLE_TO_WITHDRAWN_JOURNAL + + def test_62_article_before_oa_start(self): + journal = article_upload_tester.create_simple_journal("testowner", pissn="1234-5678", eissn="9876-5432") + journal.bibjson().oa_start = dates.now().year + helpers.save_all_block_last([ journal, + article_upload_tester.create_simple_publisher("testowner") + ]) + + # make both handles, as we want as little gap as possible between requests in a moment + handle1 = DoajXmlArticleFixtureFactory.upload_2_issns_correct() + + f1 = FileMockFactory(stream=handle1) + + job1 = ingestarticles.IngestArticlesBackgroundTask.prepare("testowner", schema="doaj", upload_file=f1) + id1 = job1.params.get("ingest_articles__file_upload_id") + self.cleanup_ids.append(id1) + + # because file upload gets created and saved by prepare + time.sleep(1) + + task1 = ingestarticles.IngestArticlesBackgroundTask(job1) + + task1.run() + + # because file upload needs to be re-saved + time.sleep(1) + + fu1 = models.FileUpload.pull(id1) + + assert fu1.status == "failed", "received status: {}".format(fu1.status) + assert job1.outcome_status == "fail" + + assert any('Articles before OA start date: Imaginaires autochtones contemporains. Introduction' in entry['message'] for entry in + job1.audit), "No message found with 'Articles before OA start date'" + + # check that article not created + assert models.Article.count_by_issns(["1234-5678", "9876-5432"]) == 0 + + def test_63_article_after_oa_start(self): + journal = article_upload_tester.create_simple_journal("testowner", pissn="1234-5678", eissn="9876-5432") + journal.bibjson().oa_start = 2013 + helpers.save_all_block_last([ journal, + article_upload_tester.create_simple_publisher("testowner") + ]) + + # make both handles, as we want as little gap as possible between requests in a moment + handle1 = DoajXmlArticleFixtureFactory.upload_2_issns_correct() + + f1 = FileMockFactory(stream=handle1) + + job1 = ingestarticles.IngestArticlesBackgroundTask.prepare("testowner", schema="doaj", upload_file=f1) + id1 = job1.params.get("ingest_articles__file_upload_id") + self.cleanup_ids.append(id1) + + # because file upload gets created and saved by prepare + time.sleep(1) + + task1 = ingestarticles.IngestArticlesBackgroundTask(job1) + + task1.run() + + # because file upload needs to be re-saved + time.sleep(1) + + fu1 = models.FileUpload.pull(id1) + + assert fu1.status == "processed", "received status: {}".format(fu1.status) + + assert not any('Articles before OA start date: Imaginaires autochtones contemporains. Introduction' in entry['message'] for entry in + job1.audit), "No message found with 'Articles before OA start date'" + + # check that article not created + assert models.Article.count_by_issns(["1234-5678", "9876-5432"]) == 1 diff --git a/doajtest/unit/test_toc.py b/doajtest/unit/test_toc.py index dafe64ac31..db597cbb63 100644 --- a/doajtest/unit/test_toc.py +++ b/doajtest/unit/test_toc.py @@ -127,8 +127,8 @@ def test_02_toc_requirements(self): # To build ToCs we need a volume, an issue, a year and a month. assert a.data['bibjson']['journal']['volume'] == '1' assert a.data['bibjson']['journal']['number'] == '99' - assert a.data['index']['date'] == "1991-01-01T00:00:00Z" - assert a.data['index']['date_toc_fv_month'] == a.data['index']['date'] == "1991-01-01T00:00:00Z" + assert a.data['index']['date'] == "2015-01-01T00:00:00Z" + assert a.data['index']['date_toc_fv_month'] == a.data['index']['date'] == "2015-01-01T00:00:00Z" def test_03_toc_uses_both_issns_when_available(self): _test_toc_uses_both_issns_when_available(self.app_test, 'doaj.toc') diff --git a/doajtest/xml_upload_test_package/DOAJ/successful.xml b/doajtest/xml_upload_test_package/DOAJ/successful.xml index 17e65b9500..4fa8c2eb75 100644 --- a/doajtest/xml_upload_test_package/DOAJ/successful.xml +++ b/doajtest/xml_upload_test_package/DOAJ/successful.xml @@ -7,8 +7,8 @@ Codicille éditeur et CRILCQ Successful - 1111-1111 - 2222-2222 + 2222-2222 + 1111-1111 2013 diff --git a/portality/bll/exceptions.py b/portality/bll/exceptions.py index f6a920a89f..1b1582250a 100644 --- a/portality/bll/exceptions.py +++ b/portality/bll/exceptions.py @@ -73,6 +73,12 @@ def __str__(self): super(ArticleNotAcceptable, self).__str__() return self.message +class ArticleBeforeOAStartDate(ArticleNotAcceptable): + """ + Exception to raise when the article is uploaded before OA start date of the Journal + """ + pass + class ArticleMergeConflict(Exception): """ Exception to raise when it's not clear which article to merge an update with diff --git a/portality/bll/services/article.py b/portality/bll/services/article.py index 0febeb63a3..543dc81528 100644 --- a/portality/bll/services/article.py +++ b/portality/bll/services/article.py @@ -55,6 +55,7 @@ def batch_create_articles(self, articles, account, duplicate_check=True, merge_d all_shared = set() all_unowned = set() all_unmatched = set() + all_before_oa_start_date = set() # Hold on to the exception so we can raise it later e_not_acceptable = None @@ -70,6 +71,9 @@ def batch_create_articles(self, articles, account, duplicate_check=True, merge_d dry_run=True) except (exceptions.ArticleMergeConflict, exceptions.ConfigurationException): raise exceptions.IngestException(message=Messages.EXCEPTION_ARTICLE_BATCH_CONFLICT) + except exceptions.ArticleBeforeOAStartDate as e: + result = {'fail': 1} + e_not_acceptable = e except exceptions.ArticleNotAcceptable as e: # The ArticleNotAcceptable exception is a superset of reasons we can't match a journal to this article e_not_acceptable = e @@ -84,7 +88,7 @@ def batch_create_articles(self, articles, account, duplicate_check=True, merge_d all_unmatched.update(result.get("unmatched", set())) report = {"success": success, "fail": fail, "update": update, "new": new, "shared": all_shared, - "unowned": all_unowned, "unmatched": all_unmatched} + "unowned": all_unowned, "unmatched": all_unmatched, "before_oa_start_date":all_before_oa_start_date} # if there were no failures in the batch, then we can do the save if fail == 0: @@ -235,8 +239,15 @@ def create_article(self, article, account, duplicate_check=True, merge_duplicate except (exceptions.DuplicateArticleException, exceptions.ArticleMergeConflict, exceptions.ConfigurationException) as e: raise e + # Check if article is uploaded before OA start date of Journal and reject the article + journal = article.get_journal() + published_year = int(article.bibjson().year) + oa_start_date = journal.has_oa_start_date() + if oa_start_date and published_year < oa_start_date: + raise exceptions.ArticleBeforeOAStartDate(message=Messages.EXCEPTION_ARTICLE_BEFORE_OA_START_DATE) + if add_journal_info: - article.add_journal_metadata() + article.add_journal_metadata(j=journal) # finally, save the new article if not dry_run: diff --git a/portality/scripts/articles_before_oa_start_report.py b/portality/scripts/articles_before_oa_start_report.py new file mode 100644 index 0000000000..ff30478cef --- /dev/null +++ b/portality/scripts/articles_before_oa_start_report.py @@ -0,0 +1,279 @@ +""" +Generate a CSV report of all articles in the index that appear to be published earlier than +their journal's Open Access (OA) start year. + +How it works +- For each Article, find its associated Journal using the Article model's helper. +- Read the Journal's OA start year via Journal.has_oa_start_date(). +- Compare the Article's publication year (bibjson.year) to the OA start year. +- If article_year < oa_start_year, output a row in the CSV. + +Usage examples + DOAJENV= python portality/scripts/articles_before_oa_start_report.py -o before_oa_start.csv \ + --page-size 1000 --keepalive 10m --retries 10 --retry-wait 5 + +Notes +- Articles without a resolvable Journal or without a numeric year are skipped. +- Journals without an OA start year are skipped. +- The script only reads from the index and does not modify any data. +""" + +import csv +import sys +import time +from typing import Iterable, Optional, List, Dict, Tuple, Iterator + +from portality.lib import dates +from portality.models import Article, Journal +from portality.models.article import ArticleQuery +from portality.dao import ScrollTimeoutException + + +HEADERS = [ + "Article ID", + "Article Title", + "Article Year", + "eISSN", + "pISSN", + "Journal ID", + "Journal Title", + "Journal OA Start Year", +] + + +def _safe_int(val: Optional[str]) -> Optional[int]: + try: + if val is None: + return None + return int(str(val)) + except Exception: + return None + + +def article_generator(issns: Optional[List[str]] = None, page_size: int = 1000, keepalive: str = "10m") -> Iterable[Article]: + """Yield Article domain objects, optionally filtered by ISSNs. Excludes withdrawn articles.""" + if issns: + query = ArticleQuery(issns=issns, in_doaj=True).query() + else: + query = ArticleQuery(in_doaj=True).query() + for a in Article.iterate(q=query, page_size=page_size, keepalive=keepalive, wrap=True): + yield a + + +def iter_articles_raw_for_issns(issns: List[str], page_size: int = 1000, keepalive: str = "10m") -> Iterator[dict]: + """Yield raw article _source dicts for the given ISSNs, excluding withdrawn, with minimal _source.""" + must = [ + {"term": {"admin.in_doaj": True}}, + {"exists": {"field": "bibjson.year"}}, + {"terms": {"index.issn.exact": issns}}, + ] + q = { + "query": {"bool": {"must": must}}, + "_source": [ + "id", + "bibjson.year", + "bibjson.title", + "bibjson.identifier", + "index.issn.exact", + ], + "track_total_hits": True, + } + for hit in Article.iterate(q=q, page_size=page_size, keepalive=keepalive, wrap=False): + yield hit + + +def retrying_article_src_stream(issns: List[str], page_size: int = 1000, keepalive: str = "10m", + retries: int = 10, retry_wait: int = 5) -> Iterator[dict]: + """Yield raw article _source dicts, restarting the ES scroll on ScrollTimeoutException up to `retries` times.""" + attempt = 0 + while True: + try: + for src in iter_articles_raw_for_issns(issns=issns, page_size=page_size, keepalive=keepalive): + yield src + return + except ScrollTimeoutException as e: + attempt += 1 + if attempt > retries: + print(f"Scroll timed out repeatedly; giving up after {retries} retries. Last error: {e}", file=sys.stderr) + raise + print(f"Scroll context lost (attempt {attempt}/{retries}). Waiting {retry_wait}s then retrying a fresh scan...", file=sys.stderr) + time.sleep(retry_wait) + # loop and restart a fresh iterator + + +def build_issn_to_journal_map(page_size: int = 2000, keepalive: str = "10m") -> Dict[str, Tuple[str, str, int]]: + """Build a map of ISSN → (journal_id, journal_title, oa_start_year) for journals in DOAJ with an OA start year.""" + must = [ + {"term": {"admin.in_doaj": True}}, + {"exists": {"field": "bibjson.oa_start"}}, + ] + q = { + "query": {"bool": {"must": must}}, + "_source": [ + "id", + "bibjson.title", + "bibjson.pissn", + "bibjson.eissn", + "bibjson.oa_start", + ], + "track_total_hits": True, + } + issn_map: Dict[str, Tuple[str, str, int]] = {} + for jsrc in Journal.iterate(q=q, page_size=page_size, keepalive=keepalive, wrap=False): + bj = jsrc.get("bibjson", {}) + oa = _safe_int(bj.get("oa_start")) + if oa is None: + continue + jid = jsrc.get("id") + jtitle = bj.get("title", "") + for issn in [bj.get("pissn"), bj.get("eissn")]: + if issn: + issn_map[str(issn)] = (jid, jtitle, oa) + return issn_map + + +def _chunked(iterable: List[str], size: int) -> Iterator[List[str]]: + for i in range(0, len(iterable), size): + yield iterable[i:i + size] + + +def make_row_from_hit(src: dict, issn_map: Dict[str, Tuple[str, str, int]]): + # article fields + bj = src.get("bibjson", {}) + title = bj.get("title") or "" + year = _safe_int(bj.get("year")) + if year is None: + return None + + # ISSNs on the article (indexed convenience field) + issns = [] + index_part = src.get("index", {}) + issn_part = index_part.get("issn", {}) + if isinstance(issn_part, dict): + issns = issn_part.get("exact", []) or [] + elif isinstance(issn_part, list): + issns = issn_part + + # also look at identifiers in bibjson if needed + pissn = None + eissn = None + idents = bj.get("identifier") or [] + if isinstance(idents, list): + for ident in idents: + if ident.get("type") == "pissn" and not pissn: + pissn = ident.get("id") + if ident.get("type") == "eissn" and not eissn: + eissn = ident.get("id") + # prefer indexed field for matching + candidate_issns = issns or [i for i in [pissn, eissn] if i] + match = next((i for i in candidate_issns if i in issn_map), None) + if not match: + return None + + jid, jtitle, oa_start = issn_map[match] + if year < oa_start: + # Resolve final pissn/eissn nicely for output + out_p = pissn + out_e = eissn + return { + "Article ID": src.get("id", ""), + "Article Title": title, + "Article Year": str(year), + "eISSN": out_e or "", + "pISSN": out_p or "", + "Journal ID": jid, + "Journal Title": jtitle or "", + "Journal OA Start Year": str(oa_start), + } + return None + + +def run(out_file: str, + issns: Optional[List[str]] = None, + limit: Optional[int] = None, + page_size: int = 1000, + keepalive: str = "10m", + retries: int = 10, + retry_wait: int = 5, + flush_every: int = 1000): + + count_examined = 0 + count_flagged = 0 + + # Build journal ISSN → (journal_id, title, oa_start) + issn_map = build_issn_to_journal_map(page_size=2000, keepalive=keepalive) + if not issn_map: + print("No journals with OA start found; nothing to do.", file=sys.stderr) + + # If user provided ISSNs, restrict to those that exist in the journal map + issn_list = list(issn_map.keys()) if not issns else [i for i in issns if i in issn_map] + if issns and not issn_list: + print("Provided ISSNs do not match any journals with OA start; exiting.", file=sys.stderr) + + with open(out_file, "w", encoding="utf-8", newline="") as f: + writer = csv.writer(f) + writer.writerow(HEADERS) + + written_since_flush = 0 + + CHUNK = 5000 # issn terms chunk size to keep query manageable + chunk_index = 0 + total_chunks = (len(issn_list) // CHUNK) + (1 if len(issn_list) % CHUNK else 0) + + for issn_chunk in _chunked(issn_list, CHUNK): + chunk_index += 1 + print(f"Processing ISSN chunk {chunk_index}/{total_chunks} containing {len(issn_chunk)} ISSNs", file=sys.stderr) + + for src in retrying_article_src_stream(issns=issn_chunk, page_size=page_size, keepalive=keepalive, + retries=retries, retry_wait=retry_wait): + count_examined += 1 + + row = make_row_from_hit(src, issn_map) + if row is not None: + writer.writerow([row[h] for h in HEADERS]) + count_flagged += 1 + written_since_flush += 1 + + if written_since_flush >= flush_every: + f.flush() + written_since_flush = 0 + + if limit is not None and count_examined >= limit: + break + + # Simple progress to stderr every 10k records + if count_examined % 10000 == 0: + print(f"Scanned {count_examined} articles; flagged {count_flagged} so far...", file=sys.stderr) + + if limit is not None and count_examined >= limit: + break + + # final flush to ensure content hits disk + f.flush() + + print(f"Done. Examined {count_examined} articles; flagged {count_flagged}. Output: {out_file}") + + +if __name__ == "__main__": + import argparse + + parser = argparse.ArgumentParser(description="Report articles earlier than their journal's OA start year") + parser.add_argument("-o", "--out", help="Output CSV file path", default=f"articles_before_oa_start_{dates.today()}.csv") + parser.add_argument("-n", "--limit", type=int, help="Limit number of articles to scan (for testing)") + parser.add_argument("-s", "--issn", action="append", help="Restrict scan to these ISSNs (may be supplied multiple times)") + parser.add_argument("-p", "--page-size", type=int, default=1000, help="Scroll page size for ES iterate") + parser.add_argument("-k", "--keepalive", default="10m", help="Elasticsearch scroll keepalive, e.g. 10m") + parser.add_argument("-r", "--retries", type=int, default=10, help="Retries on scroll timeout") + parser.add_argument("-w", "--retry-wait", type=int, default=5, help="Seconds to wait between retries") + parser.add_argument("--flush-every", type=int, default=1000, help="Flush output to disk every N rows") + + args = parser.parse_args() + + run(out_file=args.out, + issns=args.issn, + limit=args.limit, + page_size=args.page_size, + keepalive=args.keepalive, + retries=args.retries, + retry_wait=args.retry_wait, + flush_every=args.flush_every) diff --git a/portality/tasks/helpers/articles_upload_helper.py b/portality/tasks/helpers/articles_upload_helper.py index 114e4d7a9a..4f1ac4e72a 100644 --- a/portality/tasks/helpers/articles_upload_helper.py +++ b/portality/tasks/helpers/articles_upload_helper.py @@ -84,6 +84,7 @@ def upload_process(articles_upload: BaseArticlesUpload, shared = result.get("shared", []) unowned = result.get("unowned", []) unmatched = result.get("unmatched", []) + before_oa_start_date = result.get("before_oa_start_date", []) if success == 0 and fail > 0 and not ingest_exception: articles_upload.failed("All articles in file failed to import") @@ -99,6 +100,8 @@ def upload_process(articles_upload: BaseArticlesUpload, job.add_audit_message("Shared ISSNs: " + ", ".join(list(shared))) job.add_audit_message("Unowned ISSNs: " + ", ".join(list(unowned))) job.add_audit_message("Unmatched ISSNs: " + ", ".join(list(unmatched))) + if len(before_oa_start_date) > 0: + job.add_audit_message("Articles before OA start date: " + ", ".join(list(before_oa_start_date))) if new: ids = [a.id for a in articles] diff --git a/portality/templates-v2/public/publisher/xml_help.html b/portality/templates-v2/public/publisher/xml_help.html index 1f8cc75153..68b156a168 100644 --- a/portality/templates-v2/public/publisher/xml_help.html +++ b/portality/templates-v2/public/publisher/xml_help.html @@ -228,6 +228,17 @@

Explanation of XML errors

Ensure that all your articles have the correct DOIs and full-text links. If it still doesn’t work please submit a bug report or contact us with the details; we may need to clean up your existing articles manually. + + + One or more rticles cannot be uploaded, as they have a publication date before the 'OA start date' of the Journal + + + One or more articles in the xml has the publicationDate which is before Open Access start date of the Journal. + + + Ensure that all your articles are uploaded on or after Open Access start date of the Journal. + + {% endblock %} diff --git a/portality/ui/messages.py b/portality/ui/messages.py index 038848d9ca..53e2b60c20 100644 --- a/portality/ui/messages.py +++ b/portality/ui/messages.py @@ -64,6 +64,7 @@ class Messages(object): EXCEPTION_IDENTICAL_PISSN_AND_EISSN = "The Print and Online ISSNs supplied are identical. If you supply two ISSNs, they must be different." EXCEPTION_NO_ISSNS = "Neither the Print ISSN nor Online ISSN have been supplied. DOAJ requires at least one ISSN." EXCEPTION_INVALID_BIBJSON = "Invalid article bibjson: " # + Dataobj exception message + EXCEPTION_ARTICLE_BEFORE_OA_START_DATE = "One or more rticles cannot be uploaded, as they have a publication date before the 'OA start date' of the Journal" EXCEPTION_IDENTIFIER_CHANGE_CLASH = "DOI or Fulltext URL has been changed to match another article that already exists in DOAJ" EXCEPTION_IDENTIFIER_CHANGE = "Either the DOI or Fulltext URL has been changed. This operation is not permitted; please contact an administrator for help."