diff --git a/bin/i18n/README.md b/bin/i18n/README.md new file mode 100644 index 00000000..1058451e --- /dev/null +++ b/bin/i18n/README.md @@ -0,0 +1,21 @@ + +### Strings and localization + +New text should not be hardcoded into the python, but added to tabcmd/locales/en/extra.properties. This file can be given to the translation team and they will return a copy for each other language. Until then, the english string will be used as a fallback. + +To handle localizing text we used the python standard library tool [gettext](https://docs.python.org/3/library/gettext.html). This expects .mo files. I couldn't find a tool that transformed .properties -> .mo directly so we go through .po format. +(FYI: to read mo files for debugging use https://poedit.net/download) + + +These steps are separated for easier troubleshooting: each step is idempotent and will overwrite the existing output. More details about implementation are in the script code at dodo.py + +1. convert strings from .properties files to .mo for bundling +This step combines the .properties files into a single file, discarding any strings that are not present in code and normalizing curly quotes and unrecognized characters in the strings it keeps. (These files are separate because they are pulled from separate translation sources internally.) +> python -m doit properties + +2. Convert the combined .properties file into a .po file (these are human readable) +> python -m doit po + +3. Convert the .po files into .mo files (these are not human readable) +This also checks the .mo files for validity by loading them with gettext +> python -m doit mo \ No newline at end of file diff --git a/bin/i18n/__init__.py b/bin/i18n/__init__.py new file mode 100644 index 00000000..44e62640 --- /dev/null +++ b/bin/i18n/__init__.py @@ -0,0 +1,25 @@ +""" +i18n utilities for tabcmd localization. + +This package contains tools for checking and processing localization strings. +""" + +from .check_strings import ( + find_python_files, + extract_string_keys_from_file, + load_properties_files, + load_properties_file, + find_missing_strings, + check_build_mode, + check_dev_mode +) + +__all__ = [ + 'find_python_files', + 'extract_string_keys_from_file', + 'load_properties_files', + 'load_properties_file', + 'find_missing_strings', + 'check_build_mode', + 'check_dev_mode' +] \ No newline at end of file diff --git a/bin/i18n/check_strings.py b/bin/i18n/check_strings.py new file mode 100644 index 00000000..f52ee25e --- /dev/null +++ b/bin/i18n/check_strings.py @@ -0,0 +1,378 @@ +#!/usr/bin/env python3 +""" +Check for missing localization strings in the tabcmd codebase. + +This script scans all Python source files under ./tabcmd for strings using the _() +localization pattern and checks if they're present in the locale files. + +Usage: + python bin/i18n/check_strings.py # Dev mode: check against en/*.properties + python bin/i18n/check_strings.py --mode build # Build mode: check against filtered.properties for all locales + +Returns: + 0 if no missing strings found + 1 if missing strings found +""" + +import argparse +import os +import re +import sys +from pathlib import Path +from typing import Set, List, Tuple + +# Default locales (matches dodo.py) +DEFAULT_LOCALES = ["en", "de", "es", "fr", "ga", "it", "pt", "sv", "ja", "ko", "zh"] + + +def find_python_files(root_dir: str) -> List[str]: + """Find all Python files under the given directory.""" + python_files = [] + for root, dirs, files in os.walk(root_dir): + # Skip __pycache__ and other common directories to ignore + dirs[:] = [d for d in dirs if not d.startswith(".") and d != "__pycache__"] + + for file in files: + if file.endswith(".py"): + python_files.append(os.path.join(root, file)) + + return python_files + + +def extract_string_keys_from_file(file_path: str) -> Set[str]: + """Extract all string keys used in _() calls from a Python file.""" + string_keys = set() + + try: + with open(file_path, "r", encoding="utf-8") as f: + content = f.read() + except UnicodeDecodeError: + # Try with different encoding if UTF-8 fails + try: + with open(file_path, "r", encoding="latin-1") as f: + content = f.read() + except Exception as e: + print(f"Warning: Could not read file {file_path}: {e}", file=sys.stderr) + return string_keys + + # Pattern to match _("string_key") or _('string_key') + # This handles both single and double quotes + pattern = r'_\s*\(\s*["\']([^"\']+)["\']\s*\)' + + matches = re.findall(pattern, content) + string_keys.update(matches) + + return string_keys + + +def load_properties_file(file_path: str) -> Set[str]: + """Load string keys from a single properties file.""" + string_keys = set() + + if not os.path.exists(file_path): + return string_keys + + try: + with open(file_path, "r", encoding="utf-8") as f: + for line_num, line in enumerate(f, 1): + line = line.strip() + + # Skip comments and empty lines + if not line or line.startswith("#"): + continue + + # Parse key=value format + if "=" in line: + key = line.split("=", 1)[0].strip() + string_keys.add(key) + + except UnicodeDecodeError: + # Try with different encoding if UTF-8 fails + try: + with open(file_path, "r", encoding="latin-1") as f: + for line_num, line in enumerate(f, 1): + line = line.strip() + + # Skip comments and empty lines + if not line or line.startswith("#"): + continue + + # Parse key=value format + if "=" in line: + key = line.split("=", 1)[0].strip() + string_keys.add(key) + except Exception as e: + print(f"Warning: Could not read properties file {file_path}: {e}", file=sys.stderr) + except Exception as e: + print(f"Warning: Could not read properties file {file_path}: {e}", file=sys.stderr) + + return string_keys + + +def load_properties_files(locale_dir: str) -> Set[str]: + """Load all string keys from properties files in the locale directory.""" + string_keys = set() + + if not os.path.exists(locale_dir): + print(f"Warning: Locale directory {locale_dir} does not exist", file=sys.stderr) + return string_keys + + # Find all .properties files + for root, dirs, files in os.walk(locale_dir): + for file in files: + if file.endswith(".properties"): + file_path = os.path.join(root, file) + try: + with open(file_path, "r", encoding="utf-8") as f: + for line_num, line in enumerate(f, 1): + line = line.strip() + + # Skip comments and empty lines + if not line or line.startswith("#"): + continue + + # Parse key=value format + if "=" in line: + key = line.split("=", 1)[0].strip() + string_keys.add(key) + + except UnicodeDecodeError: + # Try with different encoding if UTF-8 fails + try: + with open(file_path, "r", encoding="latin-1") as f: + for line_num, line in enumerate(f, 1): + line = line.strip() + + # Skip comments and empty lines + if not line or line.startswith("#"): + continue + + # Parse key=value format + if "=" in line: + key = line.split("=", 1)[0].strip() + string_keys.add(key) + except Exception as e: + print(f"Warning: Could not read properties file {file_path}: {e}", file=sys.stderr) + except Exception as e: + print(f"Warning: Could not read properties file {file_path}: {e}", file=sys.stderr) + + return string_keys + + +def find_missing_strings(python_files: List[str], defined_keys: Set[str]) -> List[Tuple[str, str]]: + """Find missing string keys and their source files.""" + missing_strings = [] + + for file_path in python_files: + file_keys = extract_string_keys_from_file(file_path) + + for key in file_keys: + if key not in defined_keys: + missing_strings.append((key, file_path)) + + return missing_strings + + +def format_limited_list(items: List[str], prefix: str = " Missing: ", limit: int = 5) -> List[str]: + """Format a list of items with a limit, showing [+x more] if truncated.""" + sorted_items = sorted(items) + if len(sorted_items) <= 10: + # Show all items if 10 or fewer + return [f"{prefix}{item}" for item in sorted_items] + else: + # Show first 5 items + summary for more than 10 + result = [f"{prefix}{item}" for item in sorted_items[:limit]] + remaining = len(sorted_items) - limit + result.append(f" [+{remaining} more]") + return result + + +def check_build_mode(project_root: Path, locales: List[str]) -> int: + """Check all locales against filtered.properties files (build pipeline mode).""" + tabcmd_dir = project_root / "tabcmd" + + # Setup output file + output_file = project_root / "localization_check_results.txt" + + def print_and_write(message, file_handle=None): + """Print to console and write to file""" + print(message) + if file_handle: + file_handle.write(message + "\n") + + with open(output_file, "w", encoding="utf-8") as f: + print_and_write(f"Build mode: Scanning Python files in: {tabcmd_dir}", f) + print_and_write(f"Checking locales: {', '.join(locales)}", f) + print_and_write("", f) + + # Find all Python files and extract string keys + python_files = find_python_files(str(tabcmd_dir)) + print_and_write(f"Found {len(python_files)} Python files to scan", f) + + code_strings = set() + for file_path in python_files: + code_strings.update(extract_string_keys_from_file(file_path)) + + print_and_write(f"Found {len(code_strings)} unique string keys in code", f) + + # Check each locale, starting with English as baseline + english_success = True # Only track English success for exit code + english_missing_keys = set() + english_output = "" # Store English output to repeat at end + locales_with_same_missing = [] + + for locale in locales: + filtered_file = project_root / "tabcmd" / "locales" / locale / "LC_MESSAGES" / "filtered.properties" + + if not filtered_file.exists(): + print_and_write(f"WARNING: No filtered.properties for locale '{locale}' at {filtered_file}", f) + continue + + defined_keys = load_properties_file(str(filtered_file)) + missing_keys = code_strings - defined_keys + + if missing_keys: + if locale == "en": + # English has missing keys - this affects exit code + english_success = False + english_missing_keys = missing_keys + english_output = f"\nERROR: Found {len(missing_keys)} missing string keys for locale '{locale}':\n" + english_output += "=" * 60 + "\n" + for line in format_limited_list(list(missing_keys)): + english_output += line + "\n" + print_and_write(english_output.rstrip(), f) # Print now for baseline + else: + # For other languages, only show if different from English + if missing_keys == english_missing_keys: + locales_with_same_missing.append(locale) + else: + print_and_write(f"\nERROR: Found {len(missing_keys)} missing string keys for locale '{locale}' (different from English):", f) + print_and_write("=" * 60, f) + + # Show keys unique to this locale + unique_to_locale = missing_keys - english_missing_keys + if unique_to_locale: + print_and_write(f" Additional missing keys in {locale}:", f) + for line in format_limited_list(list(unique_to_locale), " Missing: "): + print_and_write(line, f) + + # Show keys missing in English but present in this locale + present_in_locale = english_missing_keys - missing_keys + if present_in_locale: + print_and_write(f" Keys present in {locale} but missing in English:", f) + for line in format_limited_list(list(present_in_locale), " Present: "): + print_and_write(line, f) + + # Show common missing keys if both have missing keys + common_missing = missing_keys & english_missing_keys + if common_missing and (unique_to_locale or present_in_locale): + print_and_write(f" Keys missing in both English and {locale}: {len(common_missing)}", f) + else: + if locale == "en": + english_output = f"[OK] Locale '{locale}': All {len(code_strings)} string keys found" + print_and_write(english_output, f) # Print now for baseline + else: + print_and_write(f"[OK] Locale '{locale}': All {len(code_strings)} string keys found", f) + + # Show summary for locales with same missing keys as English + if locales_with_same_missing: + print_and_write(f"\nNOTE: The following locales have the same missing keys as English:", f) + print_and_write(f" {', '.join(locales_with_same_missing)}", f) + print_and_write(f" Missing keys: {len(english_missing_keys)}", f) + + # Print English results again at the end for visibility + if english_output and "en" in locales: + print_and_write(f"\n--- English Results (repeated for visibility) ---", f) + print_and_write(english_output.rstrip(), f) + + # Summary message about file output + print_and_write(f"\nResults saved to: {output_file}", f) + + # Only fail if English has missing strings + if english_success: + print_and_write("\nSUCCESS: All required English strings are present", f) + else: + print_and_write("\nFAILED: English is missing required strings", f) + + return 0 if english_success else 1 + + +def check_dev_mode(project_root: Path) -> int: + """Check against English properties files (development mode - original behavior).""" + tabcmd_dir = project_root / "tabcmd" + locale_dir = project_root / "tabcmd" / "locales" / "en" + + print(f"Dev mode: Scanning Python files in: {tabcmd_dir}") + print(f"Checking locale files in: {locale_dir}") + print() + + # Find all Python files + python_files = find_python_files(str(tabcmd_dir)) + print(f"Found {len(python_files)} Python files to scan") + + # Load all defined string keys from properties files + defined_keys = load_properties_files(str(locale_dir)) + print(f"Found {len(defined_keys)} defined string keys in locale files") + + # Find missing strings + missing_strings = find_missing_strings(python_files, defined_keys) + + if missing_strings: + print(f"\nERROR: Found {len(missing_strings)} missing string keys:") + print("=" * 80) + + # Group by file for better output + missing_by_file = {} + for key, file_path in missing_strings: + if file_path not in missing_by_file: + missing_by_file[file_path] = [] + missing_by_file[file_path].append(key) + + for file_path in sorted(missing_by_file.keys()): + # Show relative path from project root + rel_path = os.path.relpath(file_path, project_root) + print(f"\nFile: {rel_path}") + print("-" * 40) + for line in format_limited_list(missing_by_file[file_path]): + print(line) + + print("\n" + "=" * 80) + print("Please add the missing string keys to the appropriate .properties files.") + return 1 + else: + print("\nSUCCESS: All string keys are properly defined in locale files.") + return 0 + + +def main(): + """Main function to check for missing localization strings.""" + parser = argparse.ArgumentParser( + description="Check for missing localization strings in tabcmd codebase" + ) + parser.add_argument( + "--mode", + choices=["dev", "build"], + default="dev", + help="dev: check against en/*.properties (default), build: check against filtered.properties for all locales" + ) + parser.add_argument( + "--locales", + nargs="*", + default=DEFAULT_LOCALES, + help="Locales to check in build mode (default: all supported locales)" + ) + + args = parser.parse_args() + + # Get the project root directory (assuming script is in bin/i18n/) + script_dir = Path(__file__).parent + project_root = script_dir.parent.parent + + if args.mode == "dev": + return check_dev_mode(project_root) + else: # build mode + return check_build_mode(project_root, args.locales) + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/contributing.md b/contributing.md index af44e7f6..635ec8e7 100644 --- a/contributing.md +++ b/contributing.md @@ -91,6 +91,7 @@ _(note that running mypy and black with no errors is required before code will b > python -m tabcmd.py [command_name] [--flags] - run tests +> pip install .[test] > pytest - run tests against a live server @@ -107,10 +108,32 @@ _(note that running mypy and black with no errors is required before code will b > bin/coverage.sh + +### Localization + +Strings should be added/edited in /tabcmd/locales/en/{name}.properties by id and referred to in code as +> string = _("string.id") + +- regenerate updated strings for packaging as exe +> python -m doit properties po mo + + +### Versioning + +Versioning is done with setuptools_scm and based on git tags. The version number will be x.y.dev0.dirty except for commits with a new version tag. + This is pulled from the git state, and to get a clean version like "v2.1.0", you must be on a commit with the tag "v2.1.0" (Creating a Github release also creates a tag on the selected branch.) + +The version reflected in the executable (tabcmd -v) is stored in a metadata file created by a .doit script: +> python -m doit version + + + ### Packaging +Packaging for release is done in a github action and should not need to be done locally. + - build an executable package with [pyinstaller](https://github.com/pyinstaller/pyinstaller). > [!NOTE] -> You can only build an executable for the platform you are running pyinstaller on. The spec for each platform is stored in tabcmd-*platform*.spec and the exact build commands for each platform can be checked in [our packaging script](.github/workflows//package.yml). +> You can only build an executable for the platform you are running pyinstaller on. The spec for each platform is stored in tabcmd-{platform}.spec and the exact build commands for each platform can be checked in [our packaging script](.github/workflows//package.yml). e.g for Windows > pyinstaller tabcmd-windows.spec --clean --distpath .\dist\windows @@ -124,24 +147,6 @@ To run the newly created executable, from a console window in the same directory To investigate what's packaged in the executable, use https://pyinstxtractor-web.netlify.app/ - -### Localization - -Strings are stored in /tabcmd/locales/[language]/*.properties by id and referred to in code as -> string = _("string.id") - -For runtime execution these files must be converted from .properties -> .po -> .mo files. These .mo files will be bundled in the the package by pyinstaller. The entire conversion action is done by a .doit script: -> doit properties po mo -You can also check that there are no malformed/forgotten message keys in the code with -> doit strings - -### Versioning - -Versioning is done with setuptools_scm and based on git tags. The version number will be x.y.dev0.dirty except for commits with a new version tag. - This is pulled from the git state, and to get a clean version like "v2.1.0", you must be on a commit with the tag "v2.1.0" (Creating a Github release also creates a tag on the selected branch.) -The version reflected in the executable (tabcmd -v) is stored in a metadata file created by a .doit script: -> doit version - ## Release process 1. Create a new Github project release manually: https://docs.github.com/en/repositories/releasing-projects-on-github/about-releases @@ -160,7 +165,7 @@ The version reflected in the executable (tabcmd -v) is stored in a metadata file - do not unzip the linux app, github doesn't like it. upload as tabcmd.zip (Pay attention to what the file type is, github also sends it as a zip if you download with curl etc. TODO: automate workflow with a github action) -1. To trigger publishing to pypi run the manual workflow on main with 'pypi'. (TODO: automate trigger) +1. To trigger publishing to pypi run the manual workflow on main with 'pypi'. 1. When the packages are available on pypi, you can run the 'Pypi smoke test action'. This action will also be run every 24 hours to validate doing pip install. (TODO: automate the after-release trigger) diff --git a/dodo.py b/dodo.py index 6ef0c364..3c5d415a 100644 --- a/dodo.py +++ b/dodo.py @@ -1,6 +1,7 @@ import glob import os import subprocess +import sys import setuptools_scm LOCALES = ["en", "de", "es", "fr", "ga", "it", "pt", "sv", "ja", "ko", "zh"] @@ -8,10 +9,9 @@ """ https://pydoit.org/ Usage: -pip install -e .[prep_work] +pip install -e .[localize] doit list # see available tasks -FYI: to read mo and po files use https://poedit.net/download """ @@ -28,33 +28,48 @@ def task_properties(): def process_code(): print("\n***** Collect all string keys used in code") - CODE_PATH = "tabcmd/**/*.py" + # Import string extraction logic from check_strings module + import sys + from pathlib import Path + + # Add bin directory to Python path for imports + bin_path = str(Path(__file__).parent / "bin") + if bin_path not in sys.path: + sys.path.insert(0, bin_path) + + from i18n.check_strings import find_python_files, extract_string_keys_from_file + + tabcmd_dir = "tabcmd" STRINGS_FILE = "tabcmd/locales/codestrings.properties" - STRING_FLAG = '_("' - STRING_END = '")' - lines = set([]) + # Use enhanced string extraction logic + python_files = find_python_files(tabcmd_dir) + all_string_keys = set() + + # Log files being processed to localize.log + with open("localize.log", "a", encoding="utf-8") as log_file: + log_file.write("# Code files processed for string extraction:\n") + for codefile in python_files: + log_file.write("\t{}\n".format(codefile)) + log_file.write("\n") + + for codefile in python_files: + file_keys = extract_string_keys_from_file(codefile) + all_string_keys.update(file_keys) + + # Write to codestrings.properties (same format as before) with open(STRINGS_FILE, "w+", encoding="utf-8") as stringfile: - for codefile in glob.glob(CODE_PATH, recursive=True): - print("\t" + codefile) - with open(codefile, encoding="utf-8") as infile: - # find lines that contain a loc string in the form _("string goes here") - for line in infile: - i = line.find(STRING_FLAG) - # include only the string itself and the quote symbols around it - if i >= 0: - # print(line) - j = line.find(STRING_END) - lines.add(line[i + 3 : j] + "\n") - - sorted_lines = sorted(lines) - stringfile.writelines(sorted_lines) - - print("{} strings collected from code and saved to {}".format(len(lines), STRINGS_FILE)) + sorted_keys = sorted(all_string_keys) + for key in sorted_keys: + stringfile.write(key + "\n") + + print("{} strings collected from code and saved to {}".format(len(all_string_keys), STRINGS_FILE)) def merge(): print("\n***** Combine our multiple input properties files into one .properties file per locale") - for current_locale in LOCALES: + # Process English last for cleaner output + locales_ordered = [loc for loc in LOCALES if loc != "en"] + ["en"] + for current_locale in locales_ordered: LOCALE_PATH = os.path.join("tabcmd", "locales", current_locale) INPUT_FILES = os.path.join(LOCALE_PATH, "*.properties") @@ -85,11 +100,19 @@ def merge(): def filter(): print("\n***** Remove strings in properties that are never used in code") REF_FILE = os.path.join("tabcmd", "locales", "codestrings.properties") - for current_locale in LOCALES: + UNUSED_ENGLISH_FILE = "unused_english_strings.txt" + + # Track unused English strings for separate output file + unused_english_strings = [] + + # Process English last for cleaner output + locales_ordered = [loc for loc in LOCALES if loc != "en"] + ["en"] + for current_locale in locales_ordered: LOCALE_PATH = os.path.join("tabcmd", "locales", current_locale) IN_FILE = os.path.join(LOCALE_PATH, "LC_MESSAGES", "combined.properties") OUT_FILE = os.path.join(LOCALE_PATH, "LC_MESSAGES", "filtered.properties") + excluded_count = 0 with open(REF_FILE, "r+", encoding="utf-8") as ref: required = ref.read() @@ -99,35 +122,68 @@ def filter(): if key in required: outfile.writelines(line) else: - print("\tExcluding {}".format(key)) + excluded_count += 1 + # Track unused English strings for output file + if current_locale == "en": + unused_english_strings.append(line.strip()) + + # Show summary for all languages + if excluded_count > 0: + print("Filtered strings for {} (excluded {} unused strings)".format(current_locale, excluded_count)) + else: + print("Filtered strings for {} (no unused strings)".format(current_locale)) + + # Write unused English strings to separate file (silently) + if unused_english_strings: + with open(UNUSED_ENGLISH_FILE, "w+", encoding="utf-8") as unused_file: + unused_file.write("# Unused English strings (present in properties but not referenced in code)\n") + unused_file.write("# Generated by doit properties filter step\n") + unused_file.write(f"# Found {len(unused_english_strings)} unused strings\n\n") + + for unused_string in sorted(unused_english_strings): + unused_file.write(unused_string + "\n") - print("Filtered strings for {}".format(current_locale)) + # Store count for final summary (will be printed at end of validation step) + with open("unused_count.tmp", "w") as count_file: + count_file.write(str(len(unused_english_strings))) """Remove """ """Search loc files for each string used in code - print an error if not found. - Input: codestrings.properties file created by task_collect_strings - Output: console listing missing keys + Uses enhanced check_strings.py script for validation. """ def enforce_strings_present(): + print("\n***** Verify that all string keys are present using check_strings validator") + + # English must be processed FIRST for validation baseline, others can be in any order + locales_ordered = ["en"] + [loc for loc in LOCALES if loc != "en"] + result = subprocess.run( + ["python", "bin/i18n/check_strings.py", "--mode", "build", "--locales"] + locales_ordered, + capture_output=True, + text=True, + ) - print("\n***** Verify that all string keys used in code are present in string properties") - STRINGS_FILE = "tabcmd/locales/codestrings.properties" - uniquify_file(STRINGS_FILE) - with open(STRINGS_FILE, "r+", encoding="utf-8") as stringfile: - codestrings = stringfile.readlines() - for locale in LOCALES: - LOC_FILE = os.path.join("tabcmd", "locales", locale, "LC_MESSAGES", "filtered.properties") - print("checking language {}".format(locale)) - with open(LOC_FILE, "r+", encoding="utf-8") as propsfile: - translated_strings = propsfile.read() - for message_key in codestrings: - message_key = message_key.strip("\n") - message_key = message_key.strip('"') - if message_key not in translated_strings: - print("ERROR: product string not in strings files [{}]".format(message_key)) - print("Done") + # Print the output from the validation script + if result.stdout: + print(result.stdout) + if result.stderr: + print(result.stderr, file=sys.stderr) + + if result.returncode != 0: + print("VALIDATION FAILED: Missing localization strings found") + exit(1) + else: + print("All string validations passed") + + # Print unused English strings summary at the end + if os.path.exists("unused_count.tmp"): + with open("unused_count.tmp", "r") as count_file: + unused_count = int(count_file.read().strip()) + print(f"\n{unused_count} unused English strings saved to unused_english_strings.txt") + os.remove("unused_count.tmp") # Clean up temp file + else: + print("\nNo unused English strings found") return { "actions": [process_code, merge, filter, enforce_strings_present], @@ -144,7 +200,7 @@ def task_po(): """ There are two versions of prop2po: - 1.0, available through pip install prop2po, from https://github.com/mivek/prop2po - it doesn't have any way to control which encoding it uses so I'm patching it + it doesn't have any way to control which encoding it uses so I'm patching it at bin/i18n/prop2po.py - 3.x, from pip install translate-toolkit: it copies key->comment, value-> msgid, ""->msgstr which is not at all what we want """ @@ -241,7 +297,7 @@ def clean_string_files(): def task_mo(): """ For all languages: Processes the tabcmd.po file to produce a final tabcmd.mo file for each language - Uses msgfmt.py from gettext, which is copied locally into the repo + Uses msgfmt.py from gettext, which is copied locally into the repo at bin/i18n/msgfmt.py """ def generate_mo(): @@ -324,6 +380,7 @@ def write_for_pyinstaller(): # local method, not exposed as a task def uniquify_file(filename): uniques = set([]) + discarded_lines = [] with open(filename, "r", encoding="utf-8") as my_file: my_file.seek(0) @@ -335,7 +392,7 @@ def uniquify_file(filename): if line == "": continue elif "=" not in line and "codestrings" not in filename: - print("\tprop2po will not like this line. Discarding [{}]".format(line)) + discarded_lines.append(line) continue else: uniques.add(line + "\n") @@ -345,4 +402,19 @@ def uniquify_file(filename): for line in uniques: my_file.write(line) - print("Saved {} sorted unique lines to {}".format(len(uniques), filename)) + # Write discarded lines to log file + if discarded_lines: + log_filename = "localize.log" + with open(log_filename, "a", encoding="utf-8") as log_file: + log_file.write("# Lines discarded from {} because prop2po will not like them\n".format(filename)) + log_file.write("# These lines don't contain '=' and are not codestrings\n\n") + for line in discarded_lines: + log_file.write(line + "\n") + log_file.write("\n") # Add separator between different files + print( + "Saved {} sorted unique lines to {} ({} discarded lines logged to {})".format( + len(uniques), filename, len(discarded_lines), log_filename + ) + ) + else: + print("Saved {} sorted unique lines to {}".format(len(uniques), filename)) diff --git a/tabcmd/commands/datasources_and_workbooks/delete_command.py b/tabcmd/commands/datasources_and_workbooks/delete_command.py index 9ac1a9e3..3e82e081 100644 --- a/tabcmd/commands/datasources_and_workbooks/delete_command.py +++ b/tabcmd/commands/datasources_and_workbooks/delete_command.py @@ -22,7 +22,7 @@ class DeleteCommand(DatasourcesAndWorkbooks): @staticmethod def define_args(delete_parser): group = delete_parser.add_argument_group(title=DeleteCommand.name) - group.add_argument("name", help=_("content_type.workbook") + "/" + _("content_type.datasource")) + group.add_argument("name", help=_("tabcmd.delete.target.name")) set_ds_xor_wb_options(group) set_project_r_arg(group) set_parent_project_arg(group) @@ -45,19 +45,20 @@ def run_command(args): if container: item_name = (args.parent_project_path or "") + "/" + (args.project_name or "default") + "/" + args.name else: - Errors.exit_with_error(logger, "Containing project could not be found") + + Errors.exit_with_error(logger, _("tabcmd.errors.parent.not.found")) logger.info(_("delete.status").format(content_type, item_name or args.name)) error = None if args.workbook or not content_type: - logger.debug("Attempt as workbook") + logger.debug(_("delete.status").format("Workbook", args.workbook)) try: item_to_delete = DeleteCommand.get_workbook_item(logger, server, args.name, container) content_type = "workbook" except TSC.ServerResponseError as error: logger.debug(error) if args.datasource or not content_type: - logger.debug("Attempt as datasource") + logger.debug(_("delete.status").format("Datasource", args.datasource)) try: item_to_delete = DeleteCommand.get_data_source_item(logger, server, args.name, container) content_type = "datasource" diff --git a/tabcmd/commands/datasources_and_workbooks/export_command.py b/tabcmd/commands/datasources_and_workbooks/export_command.py index 50267c19..ac1ed1b3 100644 --- a/tabcmd/commands/datasources_and_workbooks/export_command.py +++ b/tabcmd/commands/datasources_and_workbooks/export_command.py @@ -4,6 +4,7 @@ from tabcmd.commands.auth.session import Session from tabcmd.commands.constants import Errors +from tabcmd.execution.global_options import set_destination_filename_arg from tabcmd.execution.localize import _ from tabcmd.execution.logger_config import log from .datasources_and_workbooks_command import DatasourcesAndWorkbooks diff --git a/tabcmd/commands/datasources_and_workbooks/get_url_command.py b/tabcmd/commands/datasources_and_workbooks/get_url_command.py index 1eb0b3ee..c0ea0b72 100644 --- a/tabcmd/commands/datasources_and_workbooks/get_url_command.py +++ b/tabcmd/commands/datasources_and_workbooks/get_url_command.py @@ -26,7 +26,7 @@ class GetUrl(DatasourcesAndWorkbooks): def define_args(get_url_parser): group = get_url_parser.add_argument_group(title=GetUrl.name) group.add_argument("url", help=_("refreshextracts.options.url")) - set_filename_arg(group) + set_destination_filename_arg(group) # these don't need arguments, although that would be a good future addition # tabcmd get "/views/Finance/InvestmentGrowth.png?:size=640,480" -f growth.png # tabcmd get "/views/Finance/InvestmentGrowth.png?:refresh=yes" -f growth.png diff --git a/tabcmd/commands/extracts/create_extracts_command.py b/tabcmd/commands/extracts/create_extracts_command.py index f54baf79..16f3b282 100644 --- a/tabcmd/commands/extracts/create_extracts_command.py +++ b/tabcmd/commands/extracts/create_extracts_command.py @@ -60,7 +60,7 @@ def run_command(args): except Exception as e: if args.continue_if_exists and Errors.is_resource_conflict(e): - logger.info(_("errors.xmlapi.already_exists").format(_("content_type.extract"), args.name)) + logger.info(_("errors.xmlapi.already_exists").format(_("tabcmd.content_type.extract"), args.name)) return Errors.exit_with_error(logger, e) diff --git a/tabcmd/commands/group/create_group_command.py b/tabcmd/commands/group/create_group_command.py index 5df7bf87..36d7c5f8 100644 --- a/tabcmd/commands/group/create_group_command.py +++ b/tabcmd/commands/group/create_group_command.py @@ -33,6 +33,6 @@ def run_command(args): logger.info(_("common.output.succeeded")) except Exception as e: if args.continue_if_exists and Errors.is_resource_conflict(e): - logger.info(_("errors.xmlapi.already_exists").format(_("content_type.group"), args.name)) + logger.info(_("errors.xmlapi.already_exists").format(_("tabcmd.content_type.group"), args.name)) return Errors.exit_with_error(logger, exception=e) diff --git a/tabcmd/commands/site/delete_site_command.py b/tabcmd/commands/site/delete_site_command.py index f1892730..672d0141 100644 --- a/tabcmd/commands/site/delete_site_command.py +++ b/tabcmd/commands/site/delete_site_command.py @@ -18,7 +18,7 @@ class DeleteSiteCommand(Server): @staticmethod def define_args(delete_site_parser): args_group = delete_site_parser.add_argument_group(title=DeleteSiteCommand.name) - args_group.add_argument("site_name_to_delete", metavar="site-name", help=strings[2]) + args_group.add_argument("site_name_to_delete", metavar="site-name", help=_("tabcmd.options.delete_site.name")) @staticmethod def run_command(args): @@ -28,18 +28,9 @@ def run_command(args): server = session.create_session(args, logger) target_site: TSC.SiteItem = Server.get_site_by_name(logger, server, args.site_name_to_delete) target_site_id = target_site.id - logger.debug(strings[3].format(target_site_id, server.site_id)) + logger.debug(_("tabcmd.deletesite.status_message").format(target_site_id, server.site_id)) try: server.sites.delete(target_site_id) + logger.info(_("tabcmd.deletesite.success").format(args.site_name_to_delete)) except Exception as e: - Errors.exit_with_error(logger, strings[4], e) - logger.info(strings[0].format(args.site_name_to_delete)) - - -strings = [ - "Successfully deleted site {}", - "Server responded with an error while deleting site", - "name of site to delete", - "Deleting site {0}, logged in to site {1}", - "Error while deleting site", -] + Errors.exit_with_error(logger, _("tabcmd.deletesite.error"), e) diff --git a/tabcmd/commands/site/list_command.py b/tabcmd/commands/site/list_command.py index 64e4fa48..66c5aac0 100644 --- a/tabcmd/commands/site/list_command.py +++ b/tabcmd/commands/site/list_command.py @@ -10,24 +10,16 @@ class ListCommand(Server): Command to return a list of content the user can access """ - # strings to move to string files - local_strings = { - "tabcmd_content_listing": "===== Listing {0} content for user {1}...", - "tabcmd_listing_label_name": "\tNAME: {}", - "tabcmd_listing_label_id": "ID: {}", - "tabcmd_content_none": "No content found.", - } - name: str = "list" - description: str = "List content items of a specified type" + description: str = _("tabcmd.listing.short_description") @staticmethod def define_args(list_parser): args_group = list_parser.add_argument_group(title=ListCommand.name) args_group.add_argument( - "content", choices=["projects", "workbooks", "datasources", "flows"], help="View content" + "content", choices=["projects", "workbooks", "datasources", "flows"], help=_("tabcmd.options.select_type") ) - args_group.add_argument("-d", "--details", action="store_true", help="Show object details") + args_group.add_argument("-d", "--details", action="store_true", help=_("tabcmd.options.include_details")) @staticmethod def run_command(args): @@ -38,7 +30,7 @@ def run_command(args): content_type = args.content try: - logger.info(ListCommand.local_strings["tabcmd_content_listing"].format(content_type, session.username)) + logger.info(_("tabcmd.listing.header").format(content_type, session.username)) if content_type == "projects": items = server.projects.all() @@ -50,7 +42,7 @@ def run_command(args): items = server.flows.all() if not items or len(items) == 0: - logger.info(ListCommand.local_strings["tabcmd_content_none"]) + logger.info(_("tabcmd.listing.none")) for item in items: if args.details: logger.info("\t{}".format(item)) @@ -59,8 +51,8 @@ def run_command(args): for v in item.views: logger.info(v) else: - logger.info(ListCommand.local_strings["tabcmd_listing_label_id"].format(item.id)) - logger.info(ListCommand.local_strings["tabcmd_listing_label_name"].format(item.name)) + logger.info(_("tabcmd.listing.label.id").format(item.id)) + logger.info(_("tabcmd.listing.label.name").format(item.name)) except Exception as e: Errors.exit_with_error(logger, e) diff --git a/tabcmd/commands/user/create_users_command.py b/tabcmd/commands/user/create_users_command.py index 38709afe..f44ee333 100644 --- a/tabcmd/commands/user/create_users_command.py +++ b/tabcmd/commands/user/create_users_command.py @@ -64,7 +64,7 @@ def run_command(args): number_of_users_added += 1 except Exception as e: if Errors.is_resource_conflict(e) and args.continue_if_exists: - logger.info(_("createsite.errors.site_name_already_exists").format(args.new_site_name)) + logger.info(_("tabcmd.errors.user_already_exists").format(user_obj.name)) continue number_of_errors += 1 diff --git a/tabcmd/execution/global_options.py b/tabcmd/execution/global_options.py index f4c85361..a0c1ee79 100644 --- a/tabcmd/execution/global_options.py +++ b/tabcmd/execution/global_options.py @@ -41,7 +41,7 @@ def find_choice(choice): def set_parent_project_arg(parser): - parser.add_argument("--parent-project-path", default=None, help="path of parent project") + parser.add_argument("--parent-project-path", default=None, help=_("tabcmd.options.parent_project")) return parser @@ -55,7 +55,7 @@ def set_users_file_arg(parser): "--users", required=True, type=argparse.FileType("r", encoding="utf-8-sig"), - help="CSV file containing a list of users.", + help=_("tabcmd.options.users_file"), ) return parser @@ -65,24 +65,18 @@ def set_users_file_positional(parser): "filename", metavar="filename.csv", type=argparse.FileType("r", encoding="utf-8-sig"), - help="CSV file containing a list of users.", + help=_("tabcmd.options.users_file"), ) return parser def set_no_wait_option(parser): - parser.add_argument( - "--no-wait", - action="store_true", - help="Do not wait for asynchronous jobs to complete.", - ) + parser.add_argument("--no-wait", action="store_true", help=_("common.options.nowait")) return parser def set_silent_option(parser): - parser.add_argument( - "--silent-progress", action="store_true", help="Do not display progress messages for the command." - ) + parser.add_argument("--silent-progress", action="store_true", help=_("common.options.silent-progress")) return parser @@ -92,13 +86,13 @@ def set_completeness_options(parser): "--complete", dest="require_all_valid", action="store_true", - help="Requires that all rows be valid for any change to succeed.", + help=_("tabcmd.options.complete"), ) completeness_group.add_argument( "--no-complete", dest="require_all_valid", action="store_false", - help="Allows a change to succeed when not all rows are valid. If not specified --complete is used.", + help=_("tabcmd.options.no_complete"), ) completeness_group.set_defaults(require_all_valid=True) return parser @@ -110,13 +104,9 @@ def set_embedded_datasources_options(parser): embedded_group = parser.add_mutually_exclusive_group() embedded_group.add_argument( # nargs? "--embedded-datasources", - help="A space-separated list of embedded data source names within the target workbook.", - ) - embedded_group.add_argument( - "--include-all", - action="store_true", - help="Include all embedded data sources within target workbook.", + help=_("createextracts.options.embedded-datasources"), ) + embedded_group.add_argument("--include-all", action="store_true", help=_("createextracts.options.include-all")) return parser @@ -126,55 +116,49 @@ def set_encryption_option(parser): "--encrypt", dest="encrypt", action="store_true", # set to true IF user passes in option --encrypt - help="Encrypt the newly created extract. [N/a on Tableau Cloud: extract encryption is controlled by Site Admin]", + help=_("createextracts.options.encrypt"), ) return parser # item arguments: datasource, workbook, project, url ... +# Matching classic tabcmd: # for some reason in parser.project, publish-samples it uses -n for destination project name # for publish it uses -r for destination project name # but parser.site uses -r for site-content-url def set_project_r_arg(parser): - parser.add_argument( - "--project", - "-r", - dest="project_name", - default="", - help="The name of the project.", - ) + parser.add_argument("--project", "-r", dest="project_name", default="", help=_("tabcmd.options.project")) return parser def set_project_n_arg(parser): - parser.add_argument( - "-n", - "--project", - dest="project_name", - default="", - help="The name of the project.", - ) + parser.add_argument("-n", "--project", dest="project_name", default="", help=_("tabcmd.options.project")) return parser def set_project_arg(parser): - parser.add_argument("--project", dest="project_name", default="", help="The name of the project.") + parser.add_argument("--project", dest="project_name", default="", help=_("tabcmd.options.project")) + return parser + + +def set_resource_url_arg(parser): + parser.add_argument("--url", help=_("tabcmd.options.resource_url")) return parser def set_ds_xor_wb_options(parser): target_type_group = parser.add_mutually_exclusive_group(required=False) - target_type_group.add_argument("-d", "--datasource", action="store_true", help="The name of the target datasource.") - target_type_group.add_argument("-w", "--workbook", action="store_true", help="The name of the target workbook.") + target_type_group.add_argument("-d", "--datasource", action="store_true", help=_("tabcmd.options.datasource")) + target_type_group.add_argument("-w", "--workbook", action="store_true", help=_("tabcmd.options.workbook")) return parser # pass arguments for either --datasource or --workbook def set_ds_xor_wb_args(parser, url=False): target_type_group = parser.add_mutually_exclusive_group(required=True) - target_type_group.add_argument("-d", "--datasource", help="The name of the target datasource.") - target_type_group.add_argument("-w", "--workbook", help="The name of the target workbook.") + target_type_group.add_argument("-d", "--datasource", help=_("tabcmd.options.datasource")) + target_type_group.add_argument("-w", "--workbook", help=_("tabcmd.options.workbook")) if url: # -U conflicts with --username, they are not case sensitive target_type_group.add_argument("--url", help=_("deleteextracts.options.url")) @@ -182,7 +166,7 @@ def set_ds_xor_wb_args(parser, url=False): def set_description_arg(parser): - parser.add_argument("-d", "--description", help="Specifies a description for the item.") + parser.add_argument("-d", "--description", help=_("tabcmd.content.description")) return parser @@ -192,7 +176,7 @@ def set_site_status_arg(parser): "--status", choices=["ACTIVE", "SUSPENDED"], type=str.upper, - help="Set to ACTIVE to activate a site, or to SUSPENDED to suspend a site.", + help=_("editsite.options.status"), ) return parser @@ -201,12 +185,8 @@ def set_site_status_arg(parser): # just let both commands use either of them def set_site_id_args(parser): site_id = parser.add_mutually_exclusive_group() - site_id.add_argument("--site-id", help="Used in the URL to uniquely identify the site.") - site_id.add_argument( - "-r", - "--url", - help="Used in URLs to specify the site. Different from the site name.", - ) + site_id.add_argument("--site-id", help=_("tabcmd.content.site_id")) + site_id.add_argument("-r", "--url", help=_("tabcmd.content.site_id")) return parser @@ -215,14 +195,14 @@ def set_common_site_args(parser): parser = set_site_id_args(parser) - parser.add_argument("--user-quota", type=int, help="Maximum number of users that can be added to the site.") + parser.add_argument("--user-quota", type=int, help=_("createsite.options.user-quota")) set_site_mode_option(parser) parser.add_argument( "--storage-quota", type=int, - help="In MB, the amount of data that can be stored on the site.", + help=_("createsite.options.storage-quota"), ) encryption_modes = ["enforced", "enabled", "disabled"] @@ -230,32 +210,30 @@ def set_common_site_args(parser): "--extract-encryption-mode", choices=encryption_modes, type=case_insensitive_string_type(encryption_modes), - help="The extract encryption mode for the site can be enforced, enabled or disabled. " - "[N/a on Tableau Cloud: encryption mode is always enforced] ", + help=_("editsite.options.extract_encryption_mode"), ) parser.add_argument( "--run-now-enabled", choices=["true", "false"], - help="Allow or deny users from running extract refreshes, flows, or schedules manually.", + help=_("editsite.options.run_now_enabled"), ) return parser def set_site_mode_option(parser): - site_help = "Allows or denies site administrators the ability to add users to or remove users from the site." site_group = parser.add_mutually_exclusive_group() site_group.add_argument( "--site-mode", dest="site_admin_user_management", action="store_true", - help=site_help, + help=_("createsite.options.site-mode"), ) site_group.add_argument( "--no-site-mode", dest="site_admin_user_management", action="store_false", - help=site_help, + help=_("createsite.options.site-mode"), ) @@ -264,121 +242,71 @@ def set_site_detail_option(parser): parser.add_argument( "--get-extract-encryption-mode", action="store_true", - help="Include the extract encryption mode for each site.", + help=_("listsites.options.get_extract_encryption_mode"), ) -# export --- mmmaaaannnyyyy options -def set_filename_arg(parser, description=_("get.options.file")): - parser.add_argument("-f", "--filename", help=description) +def set_destination_filename_arg(parser): + parser.add_argument("-f", "--filename", help=_("get.options.file")) def set_publish_args(parser): - parser.add_argument("-n", "--name", help="Name to publish the new datasource or workbook by.") + parser.add_argument("-n", "--name", help=_("publish.options.name")) creds = parser.add_mutually_exclusive_group() - creds.add_argument("--oauth-username", help="The email address of a preconfigured OAuth connection") - creds.add_argument( - "--db-username", - help="Use this option to publish a database user name with the workbook, data source, or data extract.", - ) - parser.add_argument("--save-oauth", action="store_true", help="Save embedded OAuth credentials in the datasource") + creds.add_argument("--oauth-username", help=_("publish.options.oauth-username")) + creds.add_argument("--db-username", help=_("publish.options.db-username")) + parser.add_argument("--save-oauth", action="store_true", help=_("publish.options.save-oauth")) - parser.add_argument( - "--db-password", - help="publish a database password with the workbook, data source, or extract", - ) - parser.add_argument( - "--save-db-password", - action="store_true", - help="Stores the provided database password on the server.", - ) + parser.add_argument("--db-password", help=_("publish.options.db-password")) + parser.add_argument("--save-db-password", action="store_true", help=_("publish.options.save-db-password")) - parser.add_argument( - "--tabbed", - action="store_true", - help="When a workbook with tabbed views is published, each sheet becomes a tab that viewers can use to \ - navigate through the workbook", - ) - parser.add_argument( - "--disable-uploader", - action="store_true", - help="[DEPRECATED - has no effect] Disable the incremental file uploader.", - ) - parser.add_argument("--restart", help="[DEPRECATED - has no effect] Restart the file upload.") - parser.add_argument( - "--encrypt-extracts", - action="store_true", - help="Encrypt extracts in the workbook, datasource, or extract being published to the server. " - "[N/a on Tableau Cloud: extract encryption is controlled by Site Admin]", - ) + parser.add_argument("--tabbed", action="store_true", help=_("tabcmd.publish.options.tabbed.detailed")) + parser.add_argument("--disable-uploader", action="store_true", help=_("tabcmd.options.deprecated")) + + parser.add_argument("--restart", help=_("publish.options.restart")) + parser.add_argument("--encrypt-extracts", action="store_true", help=_("publish.options.encrypt_extracts")) parser.add_argument( "--skip-connection-check", action="store_true", help="Skip connection check: do not validate the workbook/datasource connection during publishing", ) - # These two only apply for a workbook, not a datasource thumbnails = parser.add_mutually_exclusive_group() - thumbnails.add_argument( - "--thumbnail-username", - help="If the workbook contains user filters, the thumbnails will be generated based on what the " - "specified user can see. Cannot be specified when --thumbnail-group option is set.", - ) - thumbnails.add_argument( - "--thumbnail-group", - help="If the workbook contains user filters, the thumbnails will be generated based on what the " - "specified group can see. Cannot be specified when --thumbnail-username option is set.", - ) + thumbnails.add_argument("--thumbnail-username", help=_("publish.options.thumbnail-username")) + thumbnails.add_argument("--thumbnail-group", help=_("publish.options.thumbnail-groupname")) - parser.add_argument("--use-tableau-bridge", action="store_true", help="Refresh datasource through Tableau Bridge") + parser.add_argument("--use-tableau-bridge", action="store_true", help=_("tabcmd.refresh.options.bridge")) # these two are used to publish an extract to an existing data source def set_append_replace_option(parser): append_group = parser.add_mutually_exclusive_group() - append_group.add_argument( - "--append", - action="store_true", - help="Set to true to append the data being published to an existing data source that has the same name. " - "The default behavior is to fail if the data source already exists. " - "If append is set to true but the data source doesn't already exist, the operation fails.", - ) + append_group.add_argument("--append", action="store_true", help=_("tabcmd.publish.options.append.detailed")) - # what's the difference between this and 'overwrite'? - # This one replaces the data but not the metadata - append_group.add_argument( - "--replace", - action="store_true", - help="Use the extract file being published to replace data in the existing data source. The default " - "behavior is to fail if the item already exists.", - ) + # This will keep the metadata of the existing data source and replace the data in the extract file + # This is meant for when a) the local file is an extract b) the server item is an existing data source + append_group.add_argument("--replace", action="store_true", help=_("publish.options.replace")) -# this is meant to be publish the whole thing on top of what's there +# This will overwrite the metadata and data of the existing content def set_overwrite_option(parser): parser.add_argument( "-o", "--overwrite", action="store_true", - help="Overwrites the workbook, data source, or data extract if it already exists on the server. The default " - "behavior is to fail if the item already exists.", + help=_("publish.options.overwrite"), ) # refresh-extracts def set_incremental_options(parser): - parser.add_argument("--incremental", action="store_true", help="Runs the incremental refresh operation.") + parser.add_argument("--incremental", action="store_true", help=_("refreshextracts.options.incremental")) return parser def set_sync_wait_options(parser): - parser.add_argument( - "--synchronous", - action="store_true", - help="Adds the full refresh operation to the queue used by the Backgrounder process, to be run as soon as a \ - Backgrounder process is available. The program will wait until the job has finished or the timeout has been reached.", - ) + parser.add_argument("--synchronous", action="store_true", help=_("refreshextracts.options.synchronous")) return parser @@ -387,12 +315,12 @@ def set_calculations_options(parser): calc_group.add_argument( "--addcalculations", action="store_true", - help="[Not implemented] Add precalculated data operations in the extract data source.", + help=_("tabcmd.options.deprecated"), ) calc_group.add_argument( "--removecalculations", action="store_true", - help="[Not implemented] Remove precalculated data in the extract data source.", + help=_("tabcmd.options.deprecated"), ) return calc_group @@ -400,24 +328,15 @@ def set_calculations_options(parser): # TODO below # these are not used in any Online operations, on the backburner - # edit-domain: none of these are used in other commands -def set_domain_arguments(parser): - parser.add_argument( - "--id", - help="The ID of domain to change. To get a list of domain IDs, use use listdomains.", - ) - parser.add_argument("--name", help="The new name for the domain.") - parser.add_argument("--nickname", help="The new nickname for the domain.") - return parser - - -# reset-openid-sub -def set_target_users_arg(parser): - target_users_group = parser.add_mutually_exclusive_group() - target_users_group.add_argument("--target-username", help="Clears sub value for the specified individual user.") - target_users_group.add_argument("--all", action="store_true", help="Clears sub values for all users.") - return parser +# def set_domain_arguments(parser): +# parser.add_argument( +# "--id", +# help="The ID of domain to change. To get a list of domain IDs, use use listdomains.", +# ) +# parser.add_argument("--name", help="The new name for the domain.") +# parser.add_argument("--nickname", help="The new nickname for the domain.") +# return parser # set setting @@ -427,58 +346,19 @@ def set_target_users_arg(parser): # sync-group -license_modes = ["on-login", "on-sync"] - - -def set_update_group_args(parser): - parser.add_argument( - "--grant-license-mode", - choices=license_modes, - type=case_insensitive_string_type(license_modes), - help="Specifies whether a role should be granted on sign in. ", - ) - parser.add_argument( - "--overwritesiterole", - action="store_true", - help="Allows a user’s site role to be overwritten with a less privileged one when using --role.", - ) - return parser - - -def set_upgrade_stop_option(parser): - parser.add_argument( - "--stop", - action="store_true", - help="When specified, stops the in progress Upgrade Thumbnails job.", - ) - return parser - - -# validate-idp-metadata -# TODO not sure how these space-separated lists will work -def set_validate_idp_options(parser): - parser.add_argument( - "--digest-algorithms", - metavar="", - help="A space-separated list of digest algorithms. Legal values are sha1and sha256. \ - If not specified, server uses values from server configuration setting, \ - wgserver.saml.blocklisted_digest_algorithms.", - ) - parser.add_argument( - "--min-allowed-elliptic-curve-size", - metavar="", - help="If not specified, server uses values from server configuration setting, \ - wgserver.saml.min_allowed.elliptic_curve_size.", - ) - parser.add_argument( - "--min-allowed-rsa-key-size", - metavar="", - help="If not specified, server uses values from server configuration setting, \ - wgserver.saml.min_allowed.rsa_key_size.", - ) - parser.add_argument( - "--site-names", - metavar="", - help="A space-separated list of site names on which to perform certificate validation. \ - If not specified, then all sites are inspected.", - ) +# license_modes = ["on-login", "on-sync"] + + +# def set_update_group_args(parser): +# parser.add_argument( +# "--grant-license-mode", +# choices=license_modes, +# type=case_insensitive_string_type(license_modes), +# help="Specifies whether a role should be granted on sign in. ", +# ) +# parser.add_argument( +# "--overwritesiterole", +# action="store_true", +# help="Allows a user’s site role to be overwritten with a less privileged one when using --role.", +# ) +# return parser diff --git a/tabcmd/execution/parent_parser.py b/tabcmd/execution/parent_parser.py index 15216631..495f8ba2 100644 --- a/tabcmd/execution/parent_parser.py +++ b/tabcmd/execution/parent_parser.py @@ -23,7 +23,7 @@ def parent_parser_with_global_options(): parser = argparse.ArgumentParser(usage=argparse.SUPPRESS, add_help=False) - parser._optionals.title = strings[0] + parser._optionals.title = _("tabcmdparser.global.behaviors") formatting_group1 = parser.add_mutually_exclusive_group() formatting_group1.add_argument( @@ -40,15 +40,12 @@ def parent_parser_with_global_options(): ) auth_options = parser.add_mutually_exclusive_group() - auth_options.add_argument("--token-name", default=None, metavar="", help=strings[13]) + auth_options.add_argument("--token-name", default=None, metavar="", help=_("tabcmd.options.token_name")) auth_options.add_argument("-u", "--username", default=None, metavar="", help=_("session.options.username")) secret_values = parser.add_mutually_exclusive_group() secret_values.add_argument( - "--token-value", - default=None, - metavar="", - help=strings[12], + "--token-value", default=None, metavar="", help=_("tabcmd.options.token_value") ) secret_values.add_argument( "-p", "--password", default=None, metavar="", help=_("session.options.password") @@ -56,7 +53,7 @@ def parent_parser_with_global_options(): secret_values.add_argument( "--password-file", default=None, metavar="", help=_("session.options.password-file") ) - secret_values.add_argument("--token-file", default=None, metavar="", help=strings[11]) + secret_values.add_argument("--token-file", default=None, metavar="", help=_("tabcmd.options.token_file")) formatting_group3 = parser.add_mutually_exclusive_group() formatting_group3.add_argument("--no-prompt", action="store_true", help=_("session.options.no-prompt")) @@ -101,7 +98,7 @@ def parent_parser_with_global_options(): parser.add_argument( "--continue-if-exists", action="store_true", # default behavior matches old tabcmd - help=strings[9], # kind of equivalent to 'overwrite' in the publish command + help=_("tabcmd.options.conflicts"), # kind of equivalent to 'overwrite' in the publish command ) parser.add_argument( @@ -115,7 +112,7 @@ def parent_parser_with_global_options(): "--language", choices=["de", "en", "es", "fr", "it", "ja", "ko", "pt", "sv", "zh"], type=str.lower, # coerce input to lowercase to act case insensitive - help=strings[10], + help=_("tabcmd.options.language.detailed"), ) parser.add_argument( @@ -124,15 +121,15 @@ def parent_parser_with_global_options(): choices=["TRACE", "DEBUG", "INFO", "ERROR"], type=str.upper, # coerce input to uppercase to act case insensitive default="info", - help=strings[8], + help=_("tabcmd.options.log"), ) parser.add_argument( "-v", "--version", action="version", - version=strings[6] + " v" + version + "\n \n", - help=strings[7], + version=f"{_('tabcmd.name')} v{version}\n \n", + help=_("version.description"), ) parser.add_argument( @@ -147,20 +144,22 @@ def parent_parser_with_global_options(): class ParentParser: # Ref https://docs.python.org/3/library/argparse.html - """Parser that will be inherited by all commands. Contains - authentication and logging level setting""" + """Parser that will be inherited by all commands. Contains authentication and logging level setting""" def __init__(self): self.global_options = parent_parser_with_global_options() self.root = argparse.ArgumentParser( - prog="tabcmd", description=strings[15], parents=[self.global_options], epilog=strings[2] + prog="tabcmd", + description="tabcmd -- " + _("tabcmd.howto"), + parents=[self.global_options], + epilog=_("tabcmdparser.help.link"), ) - self.root._optionals.title = strings[1] + self.root._optionals.title = _("tabcmdparser.global.connections") # https://stackoverflow.com/questions/7498595/python-argparse-add-argument-to-multiple-subparsers self.subparsers = self.root.add_subparsers( - title=strings[3], - description=strings[4], - metavar=strings[5], # instead of printing the list of choices + title=_("tabcmdparser.help.commands"), + description=_("tabcmdparser.help.specific"), + metavar="{ [command args]}", # instead of printing the list of choices ) def get_root_parser(self): @@ -176,7 +175,7 @@ def include(self, command): additional_parser = self.subparsers.add_parser( command.name, help=command.description, parents=[self.global_options] ) - additional_parser._optionals.title = strings[1] + additional_parser._optionals.title = _("tabcmdparser.global.connections") # This line is where we actually set each parser to call the correct command additional_parser.set_defaults(func=command) command.define_args(additional_parser) @@ -184,8 +183,10 @@ def include(self, command): # Help isn't added like the others because it has to have access to the rest, to get their args def include_help(self): - additional_parser = self.subparsers.add_parser("help", help=strings[14], parents=[self.global_options]) - additional_parser._optionals.title = strings[1] + additional_parser = self.subparsers.add_parser( + "help", help=_("tabcmdparser.help.description"), parents=[self.global_options] + ) + additional_parser._optionals.title = _("tabcmdparser.global.connections") additional_parser.set_defaults(func=Help(self)) @@ -198,28 +199,5 @@ def __init__(self, _parser: ParentParser): def run_command(self, args): logger = log(__name__, "info") - logger.info(strings[6] + " " + version + "\n") + logger.info(f"{_('tabcmd.name')} {version}\n") logger.info(self.parser.root.format_help()) - - -strings = [ - "global behavioral arguments", # 0 - global_behavior_args - "global connection arguments", # 1 - global_conn_args - "For more help see https://tableau.github.io/tabcmd/", # 2 - for_more_help - "list of tabcmd commands", # 3 - "For help on a specific command use 'tabcmd -h'.", # 4 - "{ [command args]}", # 5 - "Tableau Server Command Line Utility", # 6 - "Show version information and exit.", # 7 - "Use the specified logging level. The default level is INFO.", # 8 - "Treat resource conflicts as item creation success e.g project already exists", # 9 - "Set the language to use. Exported data will be returned in this lang/locale.\n \ - If not set, the client will use your computer locale, and the server will use \ - your user account locale", # 10 - "Read the Personal Access Token from a file.", # 11 - "Use the specified Tableau Server Personal Access Token. Requires --token-name to be set.", # 12 - "The name of the Tableau Server Personal Access Token. If using a token to sign in,\ - this is required at least once to begin session.", # 13 - "Show message listing commands and global options, then exit", # 14 - "tabcmd -- Run a specific command", # 15 -] diff --git a/tabcmd/locales/en/shared_wg_en.properties b/tabcmd/locales/en/shared_wg_en.properties index 4e7b9443..de5bb57e 100644 --- a/tabcmd/locales/en/shared_wg_en.properties +++ b/tabcmd/locales/en/shared_wg_en.properties @@ -1,52 +1,8 @@ -permissions.labels.capabilities.read=View: -permissions.labels.capabilities.write=Overwrite: -permissions.labels.capabilities.write.legacy=Save: -permissions.labels.capabilities.delete=Delete: -permissions.labels.capabilities.filter=Filter: -permissions.labels.capabilities.add_tag=Add Tag: -permissions.labels.capabilities.add_favorite=Add Favorite: -permissions.labels.capabilities.add_comment=Add Comments: -permissions.labels.capabilities.add_comment.legacy=Add Comment: -permissions.labels.capabilities.view_comments=View Comments: -permissions.labels.capabilities.vud=Download Full Data: -permissions.labels.capabilities.export_image=Download Image/PDF: -permissions.labels.capabilities.export_data=Download Summary Data: -permissions.labels.capabilities.export_xml=Download/Save A Copy: -permissions.labels.capabilities.export_xml.legacy=Download/Save As: -permissions.labels.capabilities.save_customized_view=Save Customized View: -permissions.labels.capabilities.share_view=Share Customized: -permissions.labels.capabilities.stateful_url=Permalink: -permissions.labels.capabilities.change_hierarchy=Move: -permissions.labels.capabilities.change_permissions=Set Permissions: -permissions.labels.capabilities.exclude=Exclude: -permissions.labels.capabilities.keep_only=Keep Only: -permissions.labels.capabilities.select=Select Marks: -permissions.labels.capabilities.tooltip=View Tooltips: -permissions.labels.capabilities.highlight=Legend Highlighting: -permissions.labels.capabilities.url_link=Link to Exernal Urls: -permissions.labels.capabilities.administrator=Administrator: -permissions.labels.capabilities.content_admin=Content Administrator: -permissions.labels.capabilities.create_groups=Create Groups: -permissions.labels.capabilities.rename=Rename: -permissions.labels.capabilities.transfer_ownership=Transfer Ownership: -permissions.labels.capabilities.project_leader=Project Leader: -permissions.labels.capabilities.publish=Publish: -permissions.labels.capabilities.connect=Connect: -permissions.labels.capabilities.draw=Drawing: -permissions.labels.capabilities.web_authoring=Web Edit: -permissions.labels.capabilities.xml_save_as=Save As: -permissions.labels.capabilities.create_refresh_metrics=Create/Refresh Metrics: -permissions.labels.capabilities.explain_data=Run Explain Data: -permissions.labels.capabilities.vizql_data_api_access=API Access: - permissions.labels.roles.wb_view=View permissions.labels.roles.wb_explore=Explore permissions.labels.roles.wb_publish=Publish permissions.labels.roles.wb_administer=Administer -permissions.labels.roles.ds_view=View -permissions.labels.roles.ds_explore=Explore -permissions.labels.roles.ds_publish=Publish -permissions.labels.roles.ds_administer=Administer + permissions.labels.roles.viewer=Viewer permissions.labels.roles.interactor=Interactor permissions.labels.roles.editor=Editor @@ -62,96 +18,20 @@ permissions.labels.roles.none=None formats.messages.disk_space.in_mb={0} MB formats.messages.disk_space.in_kb={0} KB -# Publishing-related errors and strings -errors.argument.flow_description.too_long=Flow description is too long ({0} characters). It must be shorter than {1} characters:\n{2} -errors.argument.workbook_name.too_long=Workbook name is too long ({0} characters). It must be shorter than {1} characters:\n{2} -errors.argument.workbook_name.empty=Workbook name is empty. -errors.argument.workbook_description.too_long=Workbook description is too long ({0} characters). It must be shorter than {1} characters:\n{2} -errors.argument.worksheet_name.too_long=Worksheet name is too long ({0} characters). It must be shorter than {1} characters:\n{2} -errors.argument.datasource_name.too_long=Datasource name is too long ({0} characters). It must be shorter than {1} characters:\n{2} -errors.argument.datasource_name.empty=Datasource name is empty. -errors.argument.datasource_description.too_long=Datasource description is too long ({0} characters). It must be shorter than {1} characters:\n{2} -errors.argument.dataconnection_column.too_long=DataConnection column {0} is too long ({1} characters). It must be shorter than {2} characters:\n{3} -errors.argument.tag.too_long=Tag is too long ({0} characters). It must be shorter than {1} characters:\n{2} -errors.argument.workbook_file.missing_or_empty=Workbook file is missing or empty. -errors.argument.file.size.too_large=File {0} is too large. Files larger than {1} MB decompressed size are not permitted. Please create an extract to proceed with publishing. -errors.argument.twb_or_tds_file.missing=twb or tds file is missing from the archive. -errors.argument.internal=Internal server error. Missing or invalid argument. -errors.argument.thumbnail.invalid_format=Workbook thumbnail format is invalid. Must be PNG with resolution of 300x300 or less. -errors.html_403.disabled_resource.workbook=Sorry, that workbook is disabled. Contact your administrator. -errors.html_404.missing_or_unauthorized_resource.project=Sorry, the project you were looking for does not exist, or you do not have permission to see that project. -errors.html_404.missing_or_unauthorized_resource.personal_space=Sorry, the personal space you were looking for does not exist, or you do not have permission to see it. -errors.html_404.missing_or_unauthorized_resource.datasource=Sorry, the datasource you were looking for does not exist. -errors.labels.formatted_request_id=Request ID: {0} -errors.links.delete_datasources=Delete Datasources -errors.links.delete_workbooks=Delete Workbooks -errors.permissions.publish.datasource.licensing=You must be an ''Interactor'' to publish a data source. -errors.permissions.publish.datasource.overwrite=A data source named "{0}" already exists in project "{1}". You do not have permissions to overwrite it. -errors.permissions.publish.datasource.overwrite_personal_space=A data source named "{0}" already exists in Personal Space. You do not have permissions to overwrite it. -errors.permissions.publish.datasource.overwrite_short=A data source named "{0}" already exists in project "{1}". -errors.permissions.publish.datasource.overwrite_short_personal_space=A datasource named "{0}" already exists in Personal Space. -errors.permissions.publish.datasource.non_existent=Data source "{0}" does not exist. -errors.permissions.publish.datasource.general=Sorry, you do not have permission to publish to this datasource. -errors.permissions.publish.project.explicit_publisher_detail=You do not have the Publishing right. -errors.permissions.publish.project.general=Sorry, you do not have permission to publish to this project. -errors.permissions.publish.project.non_existent=Project "{0}" does not exist. -errors.permissions.publish.project.not_writable=You do not have permission to write to the project "{0}". -errors.permissions.publish.project.unspecified=You must specify a project -errors.permissions.publish.workbook.disabled=The workbook "{0}" is disabled. -errors.permissions.publish.workbook.general=A workbook with this name exists in the project you are publishing to. To publish, rename your workbook. -errors.permissions.publish.workbook.general_personal_space=A workbook with this name exists in the Personal Space you are publishing to. To publish, rename your workbook. -errors.permissions.publish.workbook.overwrite=A workbook named "{0}" already exists in project "{1}". You do not have permissions to overwrite it. -errors.permissions.publish.workbook.overwrite_personal_space=A workbook named "{0}" already exists in Personal Space. You do not have permissions to overwrite it. -errors.permissions.publish.workbook.overwrite_short=A workbook named "{0}" already exists in project "{1}". -errors.permissions.publish.workbook.overwrite_short_personal_space=A workbook named "{0}" already exists in Personal Space. errors.reportable.impersonation.group_and_user=Only specify one of ''impersonate_username'' or ''impersonate_groupname'', not both. -errors.reportable.impersonation.user_not_found=User ''{0}'' does not exist. -errors.reportable.impersonation.group_not_found=Group ''{0}'' does not exist. -# errors.public.validation.hasextract is copied from the C++ file codegen strings.data IDS_PUBLIC_VALIDATION_ERROR_HASEXTRACT -errors.public.validation.hasextract=Workbooks saved to Tableau Public must use active Tableau extracts. Right-click the data source and select Use Extract before publishing. The data source, ''{0}'', is not an active extract. -errors.public.validation.storage_quota_exceeded=The content you are trying to publish will exceed the maximum capacity of your account. Please delete some contents to free up space and try again. -errors.public.validation.invalid_archive_file=The workbook you are attempting to publish is not a valid workbook. -errors.publish.dataserver_datasource_not_found=Datasource ''{0}'' not found for workbook ''{1}''. -errors.publish.datasource_update_mode=Unknown update mode: ''{0}''. -errors.publish.fileupload.invalid_checksum=Invalid checksum value provided. -errors.publish.fileupload.invalid_hash_algorithm=Hash algorithm is not supported or disabled. -errors.publish.fileupload.invalid_offset=Append to file upload with an offset that does not match recorded file size. -errors.publish.fileupload.concurrent_write=Could not acquire exclusive lock on the file as it is locked by another upload. -errors.publish.fileupload.flushing_file_buffer=Failed to flush file buffers during upload. -errors.publish.fileupload.read_or_write=Failed to read from client or write to the uploaded file. -errors.publish.fileupload.concurrent_update=A content with the same name is currently being uploaded or modified. Wait until the task completes before you try again. -errors.publish.workbook.restricted=The workbook contains connections types that are not allowed on this server. -errors.publish.datasource.concurrent_update=A data source with the same name is currently being published or modified. Wait until the task completes before you try again. -errors.publish.datasource.restricted=The data source contains a connection type that is not allowed on this server. -errors.publish.datasource.connection_check={0} failed to establish a connection to your datasource. -errors.publish.file.connection.unauthorized=This flow includes file input connections that are blocked by your Site Administrator. To publish flows, all input connections must be Tableau extracts already published on Tableau Cloud. -errors.publish.flow.connection_check=Failed to publish the flow "{0}". The server can''t connect to the data source type: "{1}". Verify that the data source is supported and that the driver or connector plugin for the data source is installed. -errors.publish.workbook.requested_extract_encryption_on_server_with_disabled_encryption=The workbook cannot be published with an encrypted extract. Extract encryption is disabled on this server. You must publish the workbook with an unencrypted extract. -errors.publish.datasource.requested_extract_encryption_on_server_with_disabled_encryption=The data source cannot be published as an encrypted extract. Extract encryption is disabled on this server. You must publish the extract unencrypted. -errors.publish.workbook.requested_extract_encryption_on_site_with_disabled_encryption=The workbook cannot be published with an encrypted extract. The site administrator has disabled extract encryption on this site. You must publish the workbook with an unencrypted extract. -errors.publish.datasource.requested_extract_encryption_on_site_with_disabled_encryption=The data source cannot be published as an encrypted extract. The site administrator has disabled extract encryption on this site. You must publish the extract unencrypted. -errors.publish.workbook.requested_no_extract_encryption_on_site_with_enforced_encryption=The workbook cannot be published with an unencrypted extract. The site administrator has enforced extract encryption on this site. You must publish the workbook and specify extract encryption. -errors.publish.datasource.requested_no_extract_encryption_on_site_with_enforced_encryption=The data source extract cannot be published unencrypted. The site administrator has enforced extract encryption on this site. You must publish the extract encrypted. + errors.publish.has_unfinished_extract_creation_job=This resource cannot be published because there are pending or unfinished extract creation jobs related to it. Please wait or cancel those jobs then try again. errors.publishing.storage_quota_exceeded.message=Site storage quota exceeded. errors.publishing.request_size_exceeded=This file cannot be published because the request size is {0} bytes which exceeds byte size limit by {1} bytes. errors.publishing.request_size_unknown=The request size could not be determined. -errors.site.quota_exceeded=The content you are trying to upload will exceed the site''s capacity by {0}. -errors.personal_space.quota_exceeded=This content exceeds your Personal Space capacity by {0}. -errors.web_authoring.workbook_deleted=The workbook ''{0}'' was deleted. Please choose ''Save As'' to save as new workbook. -errors.web_authoring.workbook_changed=Another user has edited and saved this workbook since you began editing. Therefore, you can no longer update the original workbook. Use Save As to create a new workbook with your changes. labels.publish.embed_credentials=Embed password for data source errors.publish.workbook.google_no_embedded_credentials=This workbook contains a {0} data source which requires embedded credentials. To save as another workbook, select "{1}". errors.publish.datasource.google_no_embedded_credentials=This data source contains a {0} connection which requires embedded credentials. Please publish this data source from Tableau Desktop with embedded credentials. errors.publish.datasource.federated_oauth_datasource_failed=Credentials for one or more connections in your data source have expired. Delete the saved credentials for this data source on your Account Settings page on Tableau Server or Tableau Cloud, and then connect to the data source again. -errors.publish.workbook.missing_oauth_credential=The credential ''{0}'' was not found or cannot be used to connect to the data source or workbook. -errors.publish.version_incompatible.message=Desktop and server versions are incompatible. -errors.publish.version_incompatible.details=Desktop version ''{0}'', Server document version ''{1}''. -errors.publish.datasource.managed_keychain.unique_cv_suggestions=Publish failed because of a conflict. Please resubmit your change. errors.publish.datasource.not.found=Could not find this datasource on Server. -errors.publish.extracts.disabled=Save for extract-based, archive datasources not supported yet. +errors.publish.workbook.missing_oauth_credential=The credential ''{0}'' was not found or cannot be used to connect to the data source or workbook. + errors.publish.datasource.timeout=Publish timed out after {0} seconds. Please try to use Tableau Desktop to publish this datasource. -errors.publish.data_role.permission_not_null=Cannot set explicit permissions for datasources of type data role. errors.download.extracts.timeout=Publish timed out after {0} seconds because this datasource has a large extract file. Please try to use Tableau Desktop to publish this datasource. errors.publish.datasource.invalid_extract_update_time=Invalid extractUpdateTime specified: {0} errors.publish.async.serialization=Could not serialize publishing job status error @@ -159,285 +39,20 @@ errors.publish.only_extract_files_are_allowed.message=Can''t Publish to Tableau errors.publish.only_extract_files_are_allowed.details=The file {0} cannot be uploaded because site administrator has blocked files of this type. Convert it to an extract and try again. errors.publish.validation.invalid_zip_file=The workbook or datasource archive you are attempting to publish is not valid errors.permissions.view.datasources=You must be an ''Interactor'' to view the list of data sources. - -# oauth related errors -errors.oauth.invalid_oauth_class=''{0}'' is not a valid OAuth connection type. - -errors.xmlapi.bad_request=Bad request -errors.xmlapi.payload_too_large=Payload too large -errors.xmlapi.unauthorized=Not authorized -errors.xmlapi.forbidden=Not enough permissions -errors.xmlapi.system_user_not_found=User account locked or invalid. -errors.xmlapi.site_user_not_found=This user account is not active. For help, contact your {0} administrator. errors.xmlapi.not_found=Item not found -errors.xmlapi.concurrent_update=Concurrent update errors.xmlapi.internal_server_error=Internal server error -errors.xmlapi.illegal_state=Illegal state -errors.xmlapi.invalid_parameter=Invalid parameter errors.xmlapi.already_exists=Already exists -errors.xmlapi.site_suspended=This site has been suspended. You must be a System Administrator to sign in. -errors.xmlapi.site_locked=This site has been locked. Signing in is not allowed at this time. -errors.xmlapi.update_desktop=To open workbooks created by other authors, please download the latest version of Tableau Desktop Public Edition from http://public.tableau.com/ -errors.xmlapi.throttled.summary=Too many requests -errors.xmlapi.throttled.detail=Too many requests for ''{0}''. Please retry after {1}. -errors.xmlapi.personal_space_quota_exceeded.summary=Can''t Publish to Personal Space -errors.xmlapi.personal_space_quota_exceeded.detail=Delete or move existing Personal Space content then try publishing again. - -datasource.publish.check.unpublishable=Could not publish data source. -datasource.publish.check.unpublishable.table_extension_extract_required=Table extensions cannot be published with connections that require an extract. -datasource.publish.check.publishable.extract_required=Requires creating an extract on publish. -datasource.publish.check.publishable.credentials_required={1} will temporarily access the credentials provided for ''{0}'' to confirm it can maintain a live data connection. -datasource.publish.check.publishable.server_unreachable={1} cannot reach ''{0}''. Publishing will create an extract. - -publish.bundles.workbook.subscription.subject=Your workbook {0} is ready for you. -publish.bundles.workbook.subscription.message=Your Salesforce data was added to the workbook. Go to your {0} project to get started. -extracts.status.finished.upload.workbook=Finished upload of extracts (new extract id:{0}) for Workbook ''{1}'' {2} -extracts.status.finished.refresh.workbook=Finished refresh of extracts (new extract id:{0}) for Workbook ''{1}'' {2} -extracts.status.finished.increment.workbook=Finished increment of extracts (new extract id:{0}) for Workbook ''{1}'' {2} -extracts.status.finished.encrypt.workbook=Finished encryption of extracts (new extract id:{0}) for Workbook ''{1}'' -extracts.status.finished.decrypt.workbook=Finished decryption of extracts (new extract id:{0}) for Workbook ''{1}'' -extracts.status.finished.rekey.workbook=Finished reencryption of extracts (new extract id:{0}) for Workbook ''{1}'' -extracts.status.finished.create.workbook=Finished creation of extracts (new extract id:{0}) for workbook ''{1}'' {2} -extracts.status.finished.remove.workbook=Finished change extract (data source id:{0}) to live for workbook ''{1}'' -extracts.status.finished.upload.datasource=Finished upload of extracts (new extract id:{0}) for Data Source ''{1}'' {2} -extracts.status.finished.refresh.datasource=Finished refresh of extracts (new extract id:{0}) for Data Source ''{1}'' {2} -extracts.status.finished.increment.datasource=Finished increment of extracts (new extract id:{0}) for Data Source ''{1}'' {2} -extracts.status.finished.append.datasource=Finished append of extracts (new extract id:{0}) for Data Source ''{1}'' {2} -extracts.status.finished.replace.datasource=Finished replace of extracts (new extract id:{0}) for Data Source ''{1}'' {2} -extracts.status.finished.encrypt.datasource=Finished encryption of extracts (new extract id:{0}) for Data Source ''{1}'' -extracts.status.finished.decrypt.datasource=Finished decryption of extracts (new extract id:{0}) for Data Source ''{1}'' -extracts.status.finished.rekey.datasource=Finished reencryption of extracts (new extract id:{0}) for Data Source ''{1}'' -extracts.status.finished.encrypt.flow=Finished encryption of extracts for flow ''{0}'' -extracts.status.finished.decrypt.flow=Finished decryption of extracts for flow ''{0}'' -extracts.status.finished.rekey.flow=Finished reencryption of extracts for flow ''{0}'' -extracts.status.finished.encrypt.flowdraft=Finished encryption of extracts for flow draft ''{0}'' -extracts.status.finished.decrypt.flowdraft=Finished decryption of extracts for flow draft ''{0}'' -extracts.status.finished.rekey.flowdraft=Finished reencryption of extracts for flow draft ''{0}'' -extracts.status.finished.create.datasource=Finished creation of extracts (new extract id:{0}) for data source ''{1}'' {2} -extracts.status.finished.remove.datasource=Finished change extract to live for data source ''{1}'' -extracts.status.finished.create.vconn=Finished creation of extracts (new extract id:{0}) for Virtual Connection ''{1}'' -extracts.status.finished.refresh.vconn=Finished refresh of extracts (new extract id:{0}) for Virtual Connection ''{1}'' -extracts.status.encryption_key=Encryption key ID: ''{0}''. -extracts.status.encryption_keys=Old encryption key ID: ''{0}'', new encryption key ID: ''{1}''. -extracts.errors.upload=Error uploading extracts for {0}: ''{1}'' -extracts.errors.refresh=An error occurred when refreshing extracts for the {0} ''{1}'' -extracts.errors.create=Error creating extracts for the {0} ''{1}''. -extracts.errors.increment=Error incrementing extracts for {0}: ''{1}'' -extracts.errors.append=Error appending extracts for {0}: ''{1}'' -extracts.errors.replace=Error replacing extracts for {0}: ''{1}'' -extracts.errors.download=Error downloading extracts for {0}: ''{1}'' -extracts.errors.remove=Error removing extracts for {0}: ''{1}''. -extracts.errors.title=Process extracts error extracts.errors.nothing_to_upload=No extracts to upload for {0}: ''{1}'' extracts.errors.nothing_to_refresh=No extracts to refresh for {0}: ''{1}'' extracts.errors.nothing_to_increment=No extracts to increment for {0}: ''{1}'' extracts.errors.nothing_to_append=No extracts to append for {0}: ''{1}'' extracts.errors.nothing_to_replace=No extracts to replace for {0}: ''{1}'' -extracts.errors.refresh_summary=Refresh Extracts Error -extracts.errors.encrypt_summary=Encrypt Extracts Error -extracts.errors.rekey_summary=Reencrypt Extracts Error -extracts.errors.decrypt_summary=Decrypt Extracts Error -extracts.errors.remove_summary=Remove Extracts Error -extracts.errors.create_summary=Create Extracts Error -extracts.errors.datasource_overwritten=Refresh failed because the datasource was overwritten by another task during processing. Submit refresh again to correct. -extracts.errors.workbook_overwritten=Refresh failed because the workbook was overwritten by another task during processing. Submit refresh again to correct. -extracts.errors.encrypt=An error occurred when encrypting extracts for the {0} ''{1}'' -extracts.errors.decrypt=An error occurred when decrypting extracts for the {0} ''{1}'' -extracts.errors.rekey=An error occurred when reencrypting extracts for the {0} ''{1}'' -extracts.errors.encrypt_datasource_overwritten=Extract encryption failed because the datasource was overwritten by another task during processing. -extracts.errors.encrypt_workbook_overwritten=Extract encryption failed because the workbook was overwritten by another task during processing. -extracts.errors.rekey_datasource_overwritten=Extract reencryption failed because the datasource was overwritten by another task during processing. -extracts.errors.rekey_workbook_overwritten=Extract reencryption failed because the workbook was overwritten by another task during processing. -extracts.errors.decrypt_datasource_overwritten=Extract decryption failed because the datasource was overwritten by another task during processing. -extracts.errors.decrypt_workbook_overwritten=Extract decryption failed because the workbook was overwritten by another task during processing. -extracts.errors.remove_datasource_overwritten=Data source couldn''t be changed to live because it was overwritten by another task. -extracts.errors.remove_workbook_overwritten=Workbook couldn''t be changed to live because it was overwritten by another task. -extracts.errors.create_datasource_overwritten=Data source couldn''t be extracted because it was overwritten by another task. -extracts.errors.create_workbook_overwritten=Workbook couldn''t be extracted because it was overwritten by another task. -extracts.site_mode_change_details=Changed site extract encryption mode from ''{0}'' to ''{1}''. -extracts.errors.encryption_not_licensed=This operation has failed because extract encryption is not licensed. To perform this operation, you must renew your license or decrypt your extract. -extracts.errors.operation_cancelled_detail=Remove extract operation for {0} (data source: {2}) timed out. - -workbooks.messages.load_error=Unable to load Workbook {0} -workbooks.messages.locked=The workbook ''{0}'' is currently being modified by another user. Please try again later. -workbooks.new_workbook.default_name=New Workbook -workbooks.new_datasource.default_name=New Data Source -datasources.messages.load_error=Unable to load Data Source {0} - -workbooks.new_workbook.default_sheet_name=Sheet {0} -workbooks.edit_datasource.default_sheet_name=Scratchpad - -metrics.new_metric.default_name=New Metric messages.timeout_error.summary=Timeout Error messages.timeout_error.task_canceled=Canceling task that took longer than {0} seconds -subscriptions.manage_my_subscriptions=Manage my subscriptions -subscriptions.message_greeting=Hi, -subscriptions.subscribed_footer=Here''s your subscription to the {0} -subscriptions.subscribed_workbook=workbook -subscriptions.subscribed_view=view -subscriptions.fullstop=. -subscriptions.extract_refresh_footer=You receive this subscription email when data is refreshed. -subscriptions.explore_the_viz=Explore the viz -subscriptions.explore_the_workbook=Explore the workbook -subscriptions.pdf_attached=Your Tableau PDF is attached. -subscriptions.access_live_content=You can access the live view of the content here -subscriptions.warning_deprecated_type=deprecated -subscriptions.warning_error_content_type=Unknown content type -subscriptions.warning_error_type=unknown warning type -subscriptions.warning_extract_refresh_type=extract refresh failed -subscriptions.warning_flow_run_type=flow run failed -subscriptions.warning_introduction_general=The following data quality warnings affect this {0}. -subscriptions.warning_introduction_view=For more information, see the Data Details pane in the view -subscriptions.warning_introduction_workbook=For more information, see the workbook page -subscriptions.warning_localized_column=Column -subscriptions.warning_localized_datasource=Data source -subscriptions.warning_localized_database=Database -subscriptions.warning_localized_flow=Flow -subscriptions.warning_localized_table=Table -subscriptions.warning_localized_vconn=Connection -subscriptions.warning_lowercase_column=column -subscriptions.warning_lowercase_column_plural=columns -subscriptions.warning_maintenance_type=under maintenance -subscriptions.warning_permission_error=This warning is on an asset you do not have permissions to see. -subscriptions.warning_redacted=Permissions required -subscriptions.warning_sensitve_type=sensitive data -subscriptions.warning_sensitive_intro_count=Sensitive({0}) -subscriptions.warning_sensitive_intro_view=This view uses sensitive data. -subscriptions.warning_sensitive_intro_workbook=This workbook uses sensitive data. -subscriptions.warning_severe=Important data quality warning -subscriptions.warning_stale_type=stale data -subscriptions.warning_standard=Data quality warning -subscriptions.warning_standard_plural=Data quality warnings -subscriptions.warning_warning_type=warning -errors.subscriptions.image_render_failed=The snapshot of the view ''{0}'' could not be properly rendered. -errors.subscriptions.see_live_view=To see the view online, go to {0}. -errors.subscriptions.disabled_on_server=Server-wide subscriptions setting is not enabled. Cannot process subscriptions. -errors.subscriptions.disabled_on_site=Subscriptions are disabled for site. -errors.subscriptions.site_is_locked=Site is locked. Cannot process Subscriptions. -errors.subscriptions.orphan=Workbook and views associated with this subscription could not be found. -errors.subscriptions.user_unlicensed=Skipping unlicensed user''s subscription. -errors.subscriptions.smtp_unreacheable=Unable to reach SMTP host. Check if SMTP has been configured. -errors.subscriptions.uncategorized_error=Unknown error -errors.subscriptions.pdf_render_failed=The PDF attachment for this email could not be properly rendered. -errors.subscriptions.content_limit_exceeded=The generated content is larger than the {0}MB size limit and could not be attached to this email. -errors.subscriptions.attachments_not_enabled=This subscription failed because email attachments were disabled. Contact your Tableau administrator for details. -errors.subscriptions.pdf_email_failed=The generated PDF could not be attached to this email. -errors.subscriptions.learn_more_troubleshoot=Learn More -errors.subscriptions.bad_extract_refresh_subscription_configuration=Subscription suspended because the workbook contains multiple extract refresh schedules. To resume, update frequency to On Selected Schedule. -errors.subscriptions.subscription_failed_refresh_failure=The extract refresh on the data source failed. -errors.subscriptions.multiple_extracts_for_extract_refresh_subscription=The workbook contains multiple extract refresh schedules. Update frequency to On Selected Schedule. -errors.subscriptions.datasource_is_missing_password=Data source connection error. Check the credentials on all data source connections for the workbook. - -subscriptions.results_summary.sg={0} of {1} subscription sent from schedule ''{2}''. -subscriptions.results_summary.pl={0} of {1} subscriptions sent from schedule ''{2}''. -subscriptions.results_views_per_subscription.sg={0} ({1} of 1 view succeeded) -subscriptions.results_views_per_subscription.pl={0} ({1} of {2} views succeeded) -subscriptions.results_views_per_subscription.more=({0} more) -subscriptions.results_views_per_subscription.failed={0} ({1} of {2} views failed). - -subscriptions.results_header.all_shown.failures.sg=1 failure -subscriptions.results_header.all_shown.failures.pl={0} failures -subscriptions.results_header.all_shown.warnings.sg=1 warning -subscriptions.results_header.all_shown.warnings.pl={0} warnings - -subscriptions.results_header.some_hidden.failures.pl.sg={0} failures (1 hidden, see logs) -subscriptions.results_header.some_hidden.failures.pl.pl={0} failures ({1} hidden, see logs) -subscriptions.results_header.some_hidden.warnings.pl.sg={0} warnings (1 hidden, see logs) -subscriptions.results_header.some_hidden.warnings.pl.pl={0} warnings ({1} hidden, see logs) - -dataalerts.manage_my_data_alerts=Manage alerts -dataalerts.edit_data_alert=Edit alert -dataalerts.remove_user_from_data_alert=Remove me -dataalerts.add_user_to_data_alert=Add me to this alert -dataalerts.greater_or_equal={0} was above or equal to {1}. -dataalerts.greater={0} was above {1}. -dataalerts.less_or_equal={0} was below or equal to {1}. -dataalerts.less={0} was below {1}. -dataalerts.equal={0} was equal to {1}. -dataalerts.email_alt_text=View used by data-driven alert: {0} -dataalerts.email_alt_text_hidden_view=View used by data-driven alert: {0} with changes applied -dataalerts.alert_triggered_for_the_view=Alert triggered for the view -dataalerts.failure.email.subject=Data-driven alert isn''t working for: {0} -dataalerts.failure.status=Your Alert {0} is Not Working -dataalerts.failure.view.details.alert=View this alert -dataalerts.failure.explanation=Your data-driven alert, {0}, isn''t working right now, so recipients won''t be notified when the data condition is true. You''ll receive an email notification when the alert is working again. -dataalerts.failure.affected.alert=Affected data-driven alert -dataalerts.failure.time=Time alert failed -dataalerts.failure.cause=Sometimes alerts fail due to temporary network issues, which resolve themselves. You can also try recreating the alert on the original data view, or troubleshoot common causes of alert failure like these: -dataalerts.failure.changed.view=A removed or renamed workbook, view, or data field. -dataalerts.failure.expired.credentials=Expired database credentials embedded in a workbook. -dataalerts.failure.datasource.error=An inaccessible data source. -dataalerts.failure.email.footer=You are receiving this email at {0} because of an alert failure. -dataalerts.failure.troubleshooting=To check if any of the issues listed above are causing your alert to fail, click the button below. -dataalerts.success.email.subject=Data-driven alert has resumed for: {0} -dataalerts.success.status=Your Alert {0} has resumed -dataalerts.success.explanation=Your data-driven alert, {0}, is working again, so recipients will be notified when the data condition is true. -dataalerts.failure.error.internalError=Data Alert internal error -dataalerts.failure.error.invalidDataAlertSpecification=Invalid Data Alert specification -dataalerts.failure.error.summaryTableNotFound=Summary data table not found -dataalerts.failure.error.unclassifiedError=Unclassified error -dataalerts.failure.error.viewContentNotFound=Data Alert view''s {0} content not found -dataalerts.failure.error.viewNotFound=Data alert view cannot be located -dataalerts.failure.error.workbookNotFound=Workbook not found -dataalerts.failure.error.paneDescriptorAxisNotFound=No axis found for field {0} -dataalerts.failure.error.paneDescriptorNotFound={0} -dataalerts.failure.error.unsupportedComparisonExpression=Unsupported comparison expression {0} -dataalerts.failure.error.nonConstComparisonOp=Non constant comparison operator detected: {0} -dataalerts.failure.error.cannotAddFilter=Adding Filter to data alert failed -dataalerts.failure.error.sheetNotAlertable=Sheet {0} does have supported data for alerts. {1} -dataalerts.failure.error.dataAlertMeasureNotFound=Cannot select measure. {0} -dataalerts.failure.error.tooManyMeasuresForDataAlert=Incorrect number of measures detected.{0} -dataalerts.failure.error.thresholdParseError=Failed to parse threshold text {0}. {1} -dataalerts.failure.error.dataAlertPresModelCreationError=Error while serializing alert. {0} -dataalerts.failure.error.cannotDrawDataAlertDialog=Failed to draw data alert dialog. -dataalerts.failure.error.unsupportedDataAlertType=Data alert type {0} is currently unsupported. -dataalerts.failure.suspend.email.subject=Data-driven alert is suspended for: {0} -dataalerts.failure.suspend.email.status=Your alert {0} has been suspended -dataalerts.failure.suspend.email.explanation=While suspended, we will not evaluate the view or alert recipients. After a fix has been made, you can resume the alert in {1}. -dataalerts.failure.suspend.view.name.prefix=View -dataalerts.failure.suspend.time=Time of suspension -dataalerts.failure.suspend.email.footer=You are receiving this email because your alert was suspended. -dataalerts.failure.suspend.email.view.name.my.content=My Content -dataalerts.failure.suspend.view.alerts=View Alerts - -metricsservices.failure.affected.metric=Affected metric -metricsservices.failure.cause=Sometimes metric refreshes fail due to temporary connectivity issues, which resolve themselves. Other common reasons why a metric refresh might fail include: -metricsservices.failure.changed.view=A removed or renamed workbook, view, or data field -metricsservices.failure.permissions.error=A change in permissions for the connected view -metricsservices.failure.email.footer=You are receiving this email at {0} because of a failure with your metric refresh. -metricsservices.failure.email.subject=Metric refresh isn''t working for: {0} -metricsservices.failure.expired.credentials=Expired database credentials embedded in a workbook -metricsservices.failure.explanation=Your metric isn''t able to refresh, so the metric data isn''t updating. You''ll receive an email notification when the metric refresh is working again. -metricsservices.failure.status=Your metric "{0}" isn''t refreshing -metricsservices.failure.suspend.email.explanation=While the metric refresh is suspended, your metric data will not update. After a fix has been made, you can resume the metric refresh. -metricsservices.failure.suspend.email.footer=You are receiving this email at {0} because your metric refresh was suspended. -metricsservices.failure.suspend.email.status=Refresh for your metric "{0}" has been suspended -metricsservices.failure.suspend.email.subject=Metric refresh suspended for: {0} -metricsservices.failure.suspend.time=Time of suspension -metricsservices.failure.suspend.view.metric=Go to Metric -metricsservices.failure.suspend.view.name.prefix=Connected view -metricsservices.failure.time=Time of refresh failure -metricsservices.failure.troubleshooting=To check if any of the issues listed above are causing your metric refresh to fail, click the button below. You can also try recreating the metric on the connected view. -metricsservices.failure.view.details.metric=Go to Metric -metricsservices.failure.view.name.prefix=Connected view -metricsservices.success.email.subject=Metric refresh has resumed for: {0} -metricsservices.success.explanation=The refresh for your metric is working again, so it will check for updates to your metric data. -metricsservices.success.status=Refresh for your metric "{0}" has resumed - -errors.staticimages.generic=Error generating static images for workbook {0}. {1}. errors.internal_error.request.message={0} was unable to satisfy the request. -errors.permissions.limited_user_visibility=Error completing query. User must be an admin or creator to query other users if User Visibility is set to limited - -local_names.system_user.guest.name=Guest -local_names.group.all_users.name=All Users -local_names.containers.personal_space.name=Personal Space - -password_reset.email.subject=Here''s the link to reset your password -password_reset.email.body=Hi {0},

You just requested a password reset.

To change your Tableau Server password, {1} or paste the following link into your browser: {2}

This link will expire in 48 hours, so take care of it right away.

If you didn''t request a password reset, you may want to let your server admin know.

Thanks for using Tableau!
The Tableau Team -password_reset.email.link=click here hours.pl={0} hours hours.sg=1 hour @@ -446,30 +61,6 @@ minutes.sg=1 minute seconds.pl={0} seconds seconds.sg=1 second -dataconnections.classes.tableau_server_site=Tableau Server Site - -materializeviews.errors.workbook_not_in_project=Project ''{0}'' does not contain workbook ''{1}'' -materializeviews.errors.workbook_has_no_extract=Failed to update materialized views setting for workbook ''{0}'', currently only workbooks with embedded extracts are supported -materializeviews.errors.workbook_updating_errors=Errors found while updating materialized views setting for workbooks -materializeviews.errors.workbook_too_large=Failed to update materialized views setting for workbook ''{0}'' because the size of workbook exceeds the limit of {1} GB -materializeviews.errors.project_path_not_exists=Project path ''{0}'' does not exist -materializeviews.errors.feature_disabled=Failed to update materialized views setting for workbook ''{0}'' because feature is currently disabled -materializeviews.errors.update_materialized_views_setting_failed=Failed to update materialized views setting for workbook ''{0}'' -materializeviews.errors.site_not_enabled=This site is not enabled for materialized views. Contact your administrator -materializeviews.errors.encrypted_extracts_not_supported=Materialized views feature currently does not support workbooks with encrypted extracts -materializedviews.finished.create=Created materialized views with status ''{0}'' for workbook ''{1}'' -materializedviews.finished.delete=Deleted materialized views for workbook id: {0} -materializedviews.finished.encrypt=Finished encryption of materialized views for workbook ''{0}'' -materializedviews.finished.decrypt=Finished decryption of materialized views for workbook ''{0}'' -materializedviews.finished.rekey=Finished reencryption of materialized views for workbook ''{0}'' -materializedviews.status.encryption_key=Encryption key ID: ''{0}''. -materializedviews.status.encryption_keys=Old encryption key ID: ''{0}'', new encryption key ID: ''{1}''. -materializedviews.status.encryption_type=Encryption datasource type: ''{0}'' - -customizedviews.namedSharingPrefix=Shared version - -flows.drafts.default_name=New Flow - content_type.project=Project content_type.workbook=Workbook content_type.view=View @@ -483,25 +74,9 @@ content_type.explaindata_explanation=Explanation content_type.virtualconnection=Virtual Connection content_type.virtualconnectiontable=Virtual Connection Table -upgradethumbnails.status.server_started=Server Response: Started thumbnails upgrade! -upgradethumbnails.status.server_stopped=Server Response: Stopped %d jobs - -dataquality.author.extract_refresh_monitoring=extract refresh monitoring -dataquality.author.flow_run_monitoring=flow run monitoring - -dataquality.message.refresh_failed=Refresh failed: {0} -dataquality.message.last_successful=Last successful: {0} -dataquality.message.next_scheduled=Next scheduled: {0} -dataquality.message.flow_run_output_steps_failed={0}/{1} output steps failed to run - -label_value.description.certified=This asset is trusted and recommended. -label_value.description.deprecated=This asset is no longer maintained and shouldn''t be used. -label_value.description.extract_refresh_failure=This asset''s most recent extract refresh failed. -label_value.description.flow_run_failure=This flow''s most recent run failed. -label_value.description.maintenance=This asset is undergoing maintenance. -label_value.description.sensitive_data=This asset contains sensitive information. -label_value.description.stale=This asset is outdated. -label_value.description.warning=This asset has a general quality issue. +dataalerts.failure.error.workbookNotFound=Workbook not found +dataconnections.classes.tableau_server_site=Tableau Server Site +materializeviews.errors.project_path_not_exists=Project path ''{0}'' does not exist importcsvsummary.error.details=Error details: importcsvsummary.remainingerrors=(remaining errors not shown) @@ -514,24 +89,7 @@ importcsvsummary.error.unexpected_extension=Unexpected filename extension for CS importcsvsummary.error.size_limit=CsvFile exceeds size limit of {0} importcsvsummary.error.local.password_mismatch=Update of local site user requires empty or matching password importcsvsummary.error.local.password_required=Creation of local system user when Auth is Local requires password -importcsvsummary.error.ad.user_not_found=AD user not found ''{0}'' -importcsvsummary.error.domain_not_found=Invalid domain ''{0}'' -importcsvsummary.error.cannot_create_user_identity=Cannot create user identity for user ''{0}'' -importcsvsummary.error.invalid_identity_pool_name=Invalid Identity Pool name ''{0}'' importcsvsummary.error.too_many_errors=Too many errors encountered in CSV file -audit.errors.wdc_not_allowlisted=Web data connector is not allowlisted. For help, please refer to: https://help.tableau.com/current/online/en-us/to_keep_data_fresh.htm - user.input.name.err.too_long=name is longer than the maximum number of characters {0} user.input.name.err.empty=name cannot be empty - -askdata.title=Ask Data - -slack.app.upgrade.email.notification.subject=Tableau App for Slack Update -slack.app.upgrade.email.notification.introduction=An update is available for the Tableau app for Slack. Tableau recommends updating the app to maintain app performance and to use new Tableau Cloud features. Look for new features and changes in
What''s New in Tableau Cloud. -slack.app.upgrade.email.notification.action=To update the site''s connection to the Tableau for Slack app, see Update your Tableau for Slack app. If you have multiple Tableau Cloud sites, update the app in each site. -slack.app.upgrade.email.notification.steps=This service notification has been published by Tableau Cloud. -slack.app.upgrade.email.notification.reason=You are receiving this notification because you are an administrator of a Tableau Cloud site that is connected to Slack. -slack.app.upgrade.email.notification.footer.privacy=Read our Privacy Policy - -unifiedconsumption.tcrm.folders.private=My Private App diff --git a/tabcmd/locales/en/tabcmd_messages_en.properties b/tabcmd/locales/en/tabcmd_messages_en.properties index 8213a313..c91d8f49 100644 --- a/tabcmd/locales/en/tabcmd_messages_en.properties +++ b/tabcmd/locales/en/tabcmd_messages_en.properties @@ -1,3 +1,4 @@ +# These strings come from workgroup/src/silos/tableau-server/applications/app-tabcmd/res/../tabcmd_messages commandlineutils.errors.bad_value=Option ''{0}'' has a bad value of ''{1}''. Must be one of: {2} commandlineutils.errors.not_integer=Value ''{0}'' for option ''{1}'' is not an integer @@ -11,13 +12,14 @@ common.errors.requires_group_name=The ''{0}'' command requires a group name common.errors.requires_site_name=The ''{0}'' command requires a site name common.errors.options_conflict=New and deprecated options to specify user role were specified. Only one can be used common.errors.csv_sizelimit=The CSV file you are referencing exceeds the size limit of 100MB. Divide the contents of the file into smaller CSV files and then import them separately. -common.options.admin-type=[Deprecated] Assigns [or removes] the site admin right for all users in the CSV file. This setting may be overridden by the values on individual rows in the CSV file. {0} may be: {1}, {2}, or {3}. If not specified: {3} for new users, unchanged for existing users + +common.options.admin-type=[Deprecated - has no effect] Assigns [or removes] the site admin right for all users in the CSV file. This setting may be overridden by the values on individual rows in the CSV file. {0} may be: {1}, {2}, or {3}. If not specified: {3} for new users, unchanged for existing users common.options.complete=Require [or not] that all rows be valid for any change to succeed. Default: {0} -common.options.license=[Deprecated] Sets the default license level for all users. This may be overridden by the value in the CSV file. {0} can be {1}, {2}, or {3} +common.options.license=[Deprecated - has no effect] Sets the default license level for all users. This may be overridden by the value in the CSV file. {0} can be {1}, {2}, or {3} common.options.nowait=Do not wait for the job to complete -common.options.publisher=[Deprecated] Assigns [or removes] the publish right for all users in the CSV file. This setting may be overridden by the values on individual rows in the CSV file. Default if not specified: false for new users, unchanged for existing users +common.options.publisher=[Deprecated - has no effect] Assigns [or removes] the publish right for all users in the CSV file. This setting may be overridden by the values on individual rows in the CSV file. Default if not specified: false for new users, unchanged for existing users common.options.silent-progress=Do not display progress messages for the job -common.options.sysadmin-type=[Deprecated] Assigns [or removes] the site admin right for all users in the CSV file. This setting may be overridden by the values on individual rows in the CSV file. {0} can be: {1}, {2}, {3}, or {4}. Default if not specified: {4} for new users, unchanged for existing users +common.options.sysadmin-type=[Deprecated - has no effect] Assigns [or removes] the site admin right for all users in the CSV file. This setting may be overridden by the values on individual rows in the CSV file. {0} can be: {1}, {2}, {3}, or {4}. Default if not specified: {4} for new users, unchanged for existing users common.output.job_queued_success=Job queued common.output.succeeded=Succeeded common.options.role=Sets the default role for all affected users. Legal values for {0}: {1}. If unspecified, server uses default value: {2} @@ -37,7 +39,7 @@ createextracts.for.workbook_name=Creating extract for workbook with name ''{0}'' createextracts.for.workbook_url=Creating extract for workbook with URL ''{0}'' createextracts.options.datasource=The name of the target data source for extract creation createextracts.options.embedded-datasources=A space-separated list of embedded data source names within the target workbook. Enclose data source names with double quotes if they contain spaces. Only available when creating extracts for a workbook. -createextracts.options.encrypt=Create encrypted extract. +createextracts.options.encrypt=Create encrypted extract. (N/A on Tableau Cloud: extract encryption is controlled by the site admin.) createextracts.options.include-all=Include all embedded data sources within target workbook. Only available when creating extracts for workbook. createextracts.options.parent-project-path=Path of the project that is the parent of the project that contains the target resource. Must specify the project name with --project. createextracts.options.project=The name of the project that contains the target resource. Only necessary if {0} or {1} is specified. If unspecified, the default project ''{2}'' is used. @@ -53,26 +55,9 @@ createproject.options.parent-project-path=Path of the project that will contain createproject.short_description=Create a project createproject.status=Creating project ''{0}'' on the server... -createsite.errors.invalid_url_option=The URL option is invalid when creating multiple sites. Re-run the command after removing the URL option or create each site separately createsite.errors.site_name_already_exists=A site with the name ''{0}'' already exists. Try a different site name. -createsite.errors.site_id_already_exists=A site with the ID ''{0}'' already exists. Use -r to specify a different site ID. See tabcmd createsite help for more information. -createsite.options.allow-mobile-snapshots=Allow [or-deny] mobile snapshots. Default is to allow mobile snapshots -createsite.options.allow-subscriptions=Allow [or deny] subscriptions for this site. Default is the server default setting. Subscriptions cannot be enabled if server subscriptions are disabled -createsite.options.allow-web-authoring=Allow [or deny] web authoring for this site. Default is to allow web authoring -createsite.options.metrics-level=0 for no collection, 100 for all collections -createsite.options.guest-access-enabled=Guest access permission to see views for those that are not signed into a Tableau Server account -createsite.options.site-mode=Allow [or deny] site administrator from user management on site -createsite.options.storage-quota=Site storage quota in MB -createsite.options.subscription-email=Email used for subscriptions -createsite.options.subscription-footer=Footer used for subscriptions -createsite.options.url=Site ID of the site -createsite.options.user-quota=Maximum site users -createsite.options.extract_encryption_mode=Extract encryption mode: disabled, enabled, or enforced. If not set, default value is used. -createsite.options.web_extraction_enabled=Allow [or deny] extract creation and changing extracts to live for this site. Default is allowed. -createsite.options.run_now_enabled=Allow [or deny] Run Now option for this site. Default is set to allow Run Now. -createsite.options.time_zone=Time zone to be used with extracts on this site. Default is unset to use the server''s time zone. createsite.short_description=Create a site -createsite.status=Create site ''{0}'' on the server... +createsite.status=Creating site ''{0}'' on the server... createsiteusers.options.auth-type=Assigns the authentication type for all users in the CSV file. For Tableau Cloud, {0} may be {1} (default) or {2}. For Tableau Server, {0} may be Local (default) or {2}. createsiteusers.description=Create users on the current site. The users are read from the given CSV file. The file can have the columns in the order shown below.{0}1. Username{0}2. Password (Ignored if using Active Directory){0}3. Friendly Name (Ignored if using Active Directory){0}4. License Level ({1}, {2}, {3}, {4} or {5}){0}5. Administrator ({6}/{7}){0}6. Publisher ({8}/{9}/1 or {10}/{11}/0){0}7. Email (only for Tableau Public){0}The file can have fewer columns. For example, it can be a simple list with one user name per line. You can use quotation marks if a value contains commas. Tabcmd waits for the {12} task to complete. You may choose not to wait for the task to complete on the server and instead return immediately by passing the {13} flag. System administrators cannot be created or demoted using this command. Use ''{14}'' instead @@ -131,15 +116,6 @@ deleteusers.description=Delete users. The users are read from the given comma se deleteusers.short_description=Delete users deleteusers.status=Deleting users listed in {0} from the server... -editdomain.description=Edit a domain. Use the ''{0}'' command to view a list of domains for editing -editdomain.errors.requires_nickname_name=Either a ''{0}'' option or a ''{1}'' option must be specified -editdomain.options.id=ID of the domain -editdomain.options.name=Name of the domain -editdomain.options.nickname=Nickname of the domain -editdomain.short_description=Edit a domain -editdomain.output={0}ID{1}Nickname{1}{1}Name{0} -editdomain.status=Edit domain with ID ''{0}'' on the server... - editsite.errors.invalid_site-id=The {0} option is invalid when editing multiple sites. Re-run the command after removing the {0} option or edit each site separately editsite.errors.invalid_site-name=The {0} option is invalid when editing multiple sites. Re-run the command after removing the {0} option or edit each site separately editsite.options.allow-mobile-snapshots=Allow [or-deny] mobile snapshots @@ -148,16 +124,20 @@ editsite.options.allow-web-authoring=Allow [or-deny] web authoring for this site editsite.options.cache-warmup=Allow [or deny] cache warmup for this site editsite.options.cache-warmup-threshold=Threshold in days for how recently a view must have been viewed to trigger warmup editsite.options.site-name=Display name of the site -editsite.options.status=Change availability of site. Must be either ''{0}'' or ''{1}'' +editsite.options.status=Set to ACTIVE to activate a site, or to SUSPENDED to suspend a site. editsite.options.allow-materialized-views=Allow [or deny] materialized views for this site -editsite.options.extract_encryption_mode=Extract encryption mode: disabled, enabled, or enforced. If not set, then it is not changed on the server. +editsite.options.extract_encryption_mode=Extract encryption mode: disabled, enabled, or enforced. If not set, then it is not changed on the server. (N/A on Tableau Cloud.) editsite.options.web_extraction_enabled=Allow [or deny] extract creation and changing extracts to live for this site. The default setting is allowed. -editsite.options.run_now_enabled=Allow [or deny] Run Now option for this site. Default is set to allow Run Now +editsite.options.run_now_enabled=Allow [or deny] Run Now option for this site. Default is set to allow Run Now. (N/A on Tableau Cloud.) editsite.options.time_zone=Time zone to be used with extracts on this site editsite.options.use_default_time_zone=Unset time zone to be used with extracts on this site, it will use default server time zone instead editsite.short_description=Edit a site editsite.status=Edit site ''{0}'' on the server... +createsite.options.storage-quota=In MB, the amount of data that can be stored on the site. +createsite.options.site-mode=Allow [or deny] site administrators the ability to add users to or remove users from the site. +createsite.options.user-quota=The maximum number of users allowed on the site. + encryptextracts.short_description=Encrypt extracts on a site encryptextracts.status=Scheduling extracts on site {0} to be encrypted... @@ -203,26 +183,17 @@ help.short_description=Help for tabcmd commands httputils.found_attachment=Found attachment: {0} httputils.mapped_attachment=Found attachment: {0} (re-mapped to {1}) -initialuser.errors.username_and_password_required=Both username and password are required -initialuser.options.friendly-name=Friendly name -initialuser.short_description=Create an initial user on an uninitialized server - -listdomains.description=List domains -listdomains.status=Listing domains from the server... - listsites.output={0}{0}NAME: {1} {0}SITEID: ''{2}'' listsites.output_extended={0}{0}NAME: {1} {0}SITEID: ''{2}''{0}EXTRACTENCRYPTION: {3} listsites.short_description=List sites for user listsites.status=Listing sites for user {0}... -listsites.options.get_extract_encryption_mode=Print extract encryption mode of the site(s) +listsites.options.get_extract_encryption_mode=Include the extract encryption mode for each site. login.description=Sign in to the server. Your session will continue until it expires on the server or you use {0} login.short_description=Sign in to the server logout.short_description=Sign out from the server - publish.description=Publish a workbook, data source, or extract to the server - publish.errors.invalid_datasource_update_file_type=Incorrect file type ''{0}'' for updating a data source. Replacing/appending only applies to an extract (.tde) file publish.errors.invalid_name_option={0} option is invalid when publishing multiple items publish.errors.invalid_publish_file_type=Incorrect file type ''{0}'' for publishing, expected a .tde, .tds, .tdsx, .twb, or .twbx file @@ -235,19 +206,19 @@ publish.options.async=Publish asynchronously publish.options.db-password=Database password for all data sources publish.options.db-username=Database username for all data sources publish.options.description=Description of the workbook or data source -publish.options.encrypt_extracts=Encrypt extracts on the server +publish.options.encrypt_extracts=Encrypt extracts in the workbook or datasource being published to the server. [N/a on Tableau Cloud] publish.options.name=Workbook/data source name on the server. If omitted, the workbook/data source will be named after the file name, without the twb(x), tds(x), or tde extension. Publishing a .tde file will create a data source publish.options.oauth-username=Use the credentials saved on the server keychain associated with USERNAME to publish -publish.options.overwrite=Overwrite the existing workbook/data source, if any +publish.options.overwrite=Overwrite the data and metadata of the existing workbook/data source/data extract, if any exists. publish.options.project=Project to publish the workbook/data source to publish.options.parent-project-path=Path of the parent of the project to publish the workbook/data source to -publish.options.replace=Replace extract file to existing data source +publish.options.replace=Use the extract file being published to replace data (but not metadata) in the existing data source. publish.options.restart=Restarts the file upload publish.options.save-db-password=Store the database password on server publish.options.save-oauth=Embed the OAuth credentials specified with {0} publish.options.tabbed=Publish with tabbed views enabled -publish.options.thumbnail-groupname=If the workbook contains any user filters, impersonate this group while computing thumbnails -publish.options.thumbnail-username=If the workbook contains any user filters, impersonate this user while computing thumbnails +publish.options.thumbnail-groupname=If the workbook contains user filters, the thumbnails will be generated based on what the specified group can see. Cannot be specified when --thumbnail-username option is set. +publish.options.thumbnail-username=If the workbook contains user filters, the thumbnails will be generated based on what the specified user can see. Cannot be specified when --thumbnail-group option is set. publish.status=Publishing ''{0}'' to the server. This could take several minutes... publish.success=File successfully published to the server at the following location: @@ -264,14 +235,14 @@ refreshextracts.errors.add_and_remove_calculations=Cannot run refresh with both refreshextracts.errors.calculations_and_incremental=Cannot add or remove materialized calculations during incremental extract refresh refreshextracts.errors.error=Error refreshing extract refreshextracts.options.datasource=The name of the data source to refresh -refreshextracts.options.incremental=Perform an incremental refresh (if supported) +refreshextracts.options.incremental=Perform an incremental refresh (if supported by the data source) refreshextracts.options.parent-project-path=Path of the parent of the project that contains the extract to refresh. Must specify the project name with --project. refreshextracts.options.project=The name of the project that contains the workbook/data source. Only necessary if {0} or {1} is specified. If unspecified, the default project ''{2}'' is used -refreshextracts.options.synchronous=Wait for the refresh to run and finish before exiting +refreshextracts.options.synchronous=Wait for the refresh to run and finish before exiting. Adds the refresh operation to the task queue to be run as soon as possible. refreshextracts.options.url=The canonical name for the workbook or view as it appears in the URL refreshextracts.options.workbook=The name of the workbook to refresh -refreshextracts.options.addcalculations=Adds materialized calculations into the extract -refreshextracts.options.removecalculations=Removes materialized calculations from the extract +refreshextracts.options.addcalculations=[Deprecated - has no effect.] Adds materialized calculations into the extract +refreshextracts.options.removecalculations=[Deprecated - has no effect.] Removes materialized calculations from the extract refreshextracts.short_description=Refresh the extracts of a workbook or data source on the server refreshextracts.status_incremented=Scheduling extracts for {0} ''{1}'' to be incremented now... refreshextracts.status_refreshed=Scheduling extracts for {0} ''{1}'' to be refreshed now... @@ -284,25 +255,6 @@ runschedule.errors.requires_schedule_name=The ''{0}'' command requires a schedul runschedule.short_description=Run a schedule runschedule.status=Running schedule ''{0}'' on the server... -materializeviews.short_description=Update and check on materialized views settings for sites/workbooks -materializeviews.status.site=Updating materialized views setting for site ''{0}''... -materializeviews.status.workbook=Updating materialized views setting for workbook ''{0}''... -materializeviews.errors.invalid_workbook_arguments=The materializeviews command requires one argument of a workbook name -materializeviews.errors.invalid_site_arguments=The materializeviews command requires one argument of a site ID -materializeviews.errors.invalid_workbook_option=Use ''{0}'' or ''{1}'' with ''{2}'' -materializeviews.errors.invalid_project_path=Use ''{1}'' with ''{0}'' -materializeviews.errors.invalid_site_option=Use ''{0}'' with ''{1}'' -materializeviews.errors.site_mode_conflict=Use ''{0}'' or ''{1}'' to change site materialized views mode -materializeviews.errors.site_not_updated=Site ''{0}'' not updated, only server administrators can change materialized views setting -materializeviews.errors.invalid_argument_options_combination=Use ''{0}'' or ''{1}'' to change materialized views setting -materializeviews.options.materialize_now=Use ''{0}'' to schedule materialization for given workbook immediately -materializeviews.options.project=Use ''{0}'' to specify project name -materializeviews.options.parent_project_path=Use ''{0}'' to specify parent project path to ''{1}'' -materializeviews.options.allow_workbook=Use ''{0}'' to enable/disable materialized views for workbook -materializeviews.options.allow_site=Use ''{0}'' to enable/disable materialized views for site -materializeviews.options.selected_workbooks=Use ''{0}'' to enable selected workbooks in the site, used with the option ''{1}''. This is the default option -materializeviews.options.all_workbooks=Use ''{0}'' to enable all workbooks in the site, used with the option ''{1}'' - session.auto_site_login=Auto-sign in to site: {0} session.certificate-alias=Certificate alias: {0} session.connecting=Connecting to the server... @@ -318,7 +270,7 @@ session.errors.no_sslcafile_found=Could not find a Certificate Authority (CA) fi session.errors.script_no_password=Tabcmd was run from a script or IDE but no password was provided. A password must be provided as an argument to tabcmd in order to log in. session.errors.session_expired=Your session has expired session.errors.unsupported_command=Command not supported by this server -session.keystore.notice=Do you want to store password in local to avoid typing password next time? [anwser yes or no] +session.keystore.notice=Do you want to store password locally to avoid typing password next time? [yes or no] session.keystore.password=Keystore Password: session.keystore.pwdfile_create=Create keystore password file in path: {0} session.keystore.pwdfile_remove=Remove the keystore password file which contains wrong password @@ -361,79 +313,67 @@ sessionoptions.errors.bad_protocol=Protocol ''{0}'' is not supported sessionoptions.errors.bad_proxy_format=Proxy format does not match HOST:PORT: {0} sessionoptions.errors.bad_timeout=Non-numeric argument to {0} ''{1}'' ignored -set.description=Set a setting on the server. Use !setting to turn a setting off -set.errors.requires_setting_name=The ''{0}'' command requires a setting name -set.errors.single_setting_name=The ''{0}'' command take a single setting name -set.short_description=Set a setting on the server - -setsetting.status=Setting ''{0}'' to {1} - -syncgroup.errors.server_admin=Minimum site role of a group cannot be set to ''ServerAdministrator''. -syncgroup.options.role=Sets the default role for all users in the group. Legal values for {0}: {1}. If a user already exists, the given role is only applied if it''s less restrictive than the user''s current role. If unspecified, default is {2} for new users and unchanged for existing users -syncgroup.options.overwritesiterole=Allow''s a user''s site role to be changed to a less privileged one when using --role, overriding the default behavior. -syncgroup.options.grantlicensemode=When to grant a user the license specified on the group. Legal values are: {0}. If no value is passed in ''on-sync'' will be used as the default value. -syncgroup.short_description=Synchronize the server with an Active Directory group -syncgroup.status=Synchronizing server with Active Directory group {0}... -syncgroup.options.sysadmin-type=[Deprecated. Use --role instead.] Assigns [or removes] the admin right for all users in the group. {0} can be: {1}, {2}, {3}, or {4}. -syncgroup.options.license=[Deprecated. Use --role instead.] Sets the default license level for all users in the group. {0} can be {1}, {2}, or {3}. -syncgroup.options.publisher=[Deprecated. Use --role instead.] Assigns [or removes] the publish right for all users in the group. - -tabcmd.mutually_exclusive_options=Cannot specify ''{0}'' and ''{1}'' in the same command - -tabcmdparser.errors.ambiguous_option=Ambiguous option ''{0}'' among options: {1} -tabcmdparser.warning.unexpected_dash=Unexpected dash detected in argument: {0}. Valid dashes are -, --. This error often occurs when copy/pasting from a text editor with formatting. Try re-typing the dash for the argument in the command line. +setsetting.status=Setting {0} to {1} +# tabcmd.x messages are new for tabcmd 2 -resetopenidsub.short_description=Reset an OpenID Connect SubId for a user -resetopenidsub.options.targetusername=User to be reset -resetopenidsub.options.all=Reset all users - -version.description=Print version information +tabcmd.content.description=A description for the item. +tabcmd.content.site_id=Used in the URL to uniquely identify the site. Different from the site name. +tabcmd.delete.target.name=The name of the workbook or data source on the server to be deleted. -vizqlsession.errors.cannot_create_session=Failed to create a VizQl session on the server -vizqlsession.errors.no_command_result=Could not find command result: {0} -vizqlsession.errors.session_already_closed=VizQl session object already closed -vizqlsession.errors.cannot_parse_response=Cannot parse JSON response string: {0} - -upgradethumbnails.short_description=This will run the Upgrade Thumbnails backgrounder job. This will rerender to high resolution any thumbnails generated prior to Tableau 2018.2. This can put higher than usual load on Tableau Server and external datasources. If any problems arise, the job can be stopped by running tabcmd upgradethumbnails --server --stop -upgradethumbnails.options.stop=Stop all upgrade thumbnails related jobs -upgradethumbnails.status.running=Starting thumbnails upgrade -upgradethumbnails.status.not_running=Not starting thumbnails upgrade -upgradethumbnails.status.cancelling=Cancelling thumbnails upgrade jobs -upgradethumbnails.continue_promt=Do you wish to continue? Enter 1 for Yes, 0 for No (1/0) -upgradethumbnails.invalid_response=Invalid response -validateidpmetadata.options.digest_algorithms=A space-separated list of digest algorithms. Legal values for {0}: {1}. If not specified, server uses values from server configuration setting: {2} -validateidpmetadata.options.min_allowed_rsa_key_size=If not specified, server uses values from server configuration setting: {0} -validateidpmetadata.options.min_allowed_elliptic_curve_size=If not specified, server uses values from server configuration setting: {0} -validateidpmetadata.options.site_names=A space-separated list of site names on which to perform certificate validation. If not specified, then all sites are inspected. -validateidpmetadata.short_description=Identifies Tableau Server sites that are configured with IdPs using the insecure digest algorithm, SHA-1. This command also identifies IdPs that are using certificates with an insufficient RSA key size or elliptic curve size. +tabcmd.content_type.extract=Extract +tabcmd.content_type.group=Group +tabcmd.deletesite.success=Successfully deleted site {} +tabcmd.deletesite.error=Error while deleting site +tabcmd.deletesite.status_message=Deleting site {0}, logged in to site {1} -tabcmd.content.description=A description for the item. -tabcmd.content.site_id=Used in the URL to uniquely identify the site. Different from the site name. +tabcmd.errors.parent.not.found=Containing project could not be found +tabcmd.errors.requires_resource_param=This command requires a resource path in a specific format. Value entered: {1}. Accepted values: {2}, {3}, {4} tabcmd.howto=Run a specific command +tabcmd.launching=Launching tabcmd +tabcmd.listing.header====== Listing {0} content for user {1}... +tabcmd.listing.label.name=\tNAME: {} +tabcmd.listing.label.id=ID: {} +tabcmd.listing.none=No content found + tabcmd.name=Tableau Server Command Line Utility +tabcmd.listing.short_description=List content items of a specified type +tabcmd.options.select_type=Select type of content to view +tabcmd.options.include_details=Show object details tabcmd.options.project=The name of the project tabcmd.options.parent_project=The path to the project that contains the target item. +tabcmd.options.delete_site.name=The name of the site to delete tabcmd.options.workbook=The name of the target workbook tabcmd.options.datasource=The name of the target data source -tabcmd.options.materializeviews=Adjust performance of this workbook by materializing calculations for all views, if enabled. -tabcmd.options.dematerialize=Adjust performance of this workbook by disable materialized calculations for all views, if enabled. tabcmd.options.complete=Require that all rows be valid for any change to succeed. (Defaults to true) -tabcmd.options.no_complete=Do not require that all rows be valid for any change to succeed. +tabcmd.options.no_complete=Do not require that all rows be valid for any change to succeed. tabcmd.options.users_file=CSV file that contains a list of users, one per line tabcmd.options.token_file=Read the Personal Access Token from a file. tabcmd.options.token_name=The name of your Personal Access Token. If using a token to sign in, this is required at least once to begin session. tabcmd.options.token_value=The secret value of your Personal Access Token to use for authentication. Requires --token-name to be set. -tabcmd.options.language.detailed=Set the language to use. Exported data will be returned in this lang/locale.\n If not set, the client will use your computer locale, and the server will use your user account locale -tabcmd.options.conflicts=Treat resource conflicts as item creation success e.g. project already exists +tabcmd.options.language.detailed=Set the language to use. Exported data will be returned in this lang/locale.\n \nIf not set, the client will use your computer locale, and the server will use your user account locale +tabcmd.options.conflicts=Treat resource conflicts as item creation success e.g project already exists tabcmd.options.log=Use the specified logging level. The default level is INFO. - tabcmd.publish.options.restart=[Deprecated - has no effect] +tabcmd.options.deprecated=[Deprecated - has no effect] tabcmd.publish.options.tabbed.detailed=Publish with tabbed views enabled. Each sheet becomes a tab that viewers can use to navigate through the workbook. -tabcmd.publish.options.append.detailed=Append the data being published to an existing data source that has the same name. If append is set to true but a data source does not already exist, the operation fails. +tabcmd.publish.options.append.detailed=Append the data being published to an existing data source that has the same name. If append is set to true but a data source doesn''t already exist, the operation fails. + tabcmd.publish.options.disable-uploader=[Deprecated - has no effect] +tabcmd.errors.user_already_exists=A user with the name {0} already exists. Try a different name. tabcmd.refresh.options.bridge=Refresh datasource through Tableau Bridge +tabcmd.options.resource_url=The canonical name for the resource as it appears in the URL +tabcmdparser.errors.ambiguous_option=Ambiguous option ''{0}'' among options: {1} +tabcmdparser.warning.unexpected_dash=Unexpected dash detected in argument: {0}. Valid dashes are -, --. This error often occurs when copy/pasting from a text editor with formatting. Try re-typing the dash for the argument in the command line. +tabcmdparser.global.behaviors=Global behaviors: +tabcmdparser.global.connections=Global connections: +tabcmdparser.help.link=For more help see https://tableau.github.io/tabcmd/ +tabcmdparser.help.commands=List of Tabcmd commands +tabcmdparser.help.specific=For help on a specific command use 'tabcmd -h'. +tabcmdparser.help.description=Show message listing commands and global options, then exit + +version.description=Print version information