From 2141fa6c1dc18ed759b2c5fe3a67b7421685bbad Mon Sep 17 00:00:00 2001 From: lavianalon <130892560+lavianalon@users.noreply.github.com> Date: Mon, 10 Mar 2025 17:12:39 +0200 Subject: [PATCH] Update CLI reference to latest Update CLI v2 reference to latest including inference commands --- .../cli-reference/new-cli/runai_config.md | 1 + .../new-cli/runai_config_describe.md | 30 ++++++ .../new-cli/runai_config_generate.md | 14 ++- .../cli-reference/new-cli/runai_inference.md | 29 +++++ .../new-cli/runai_inference_delete.md | 43 ++++++++ .../new-cli/runai_inference_describe.md | 61 +++++++++++ .../new-cli/runai_inference_list.md | 56 ++++++++++ .../new-cli/runai_inference_submit.md | 100 ++++++++++++++++++ .../new-cli/runai_inference_update.md | 57 ++++++++++ .../new-cli/runai_mpi_describe.md | 1 + .../cli-reference/new-cli/runai_mpi_list.md | 4 +- .../cli-reference/new-cli/runai_mpi_logs.md | 2 +- .../cli-reference/new-cli/runai_mpi_submit.md | 12 ++- .../cli-reference/new-cli/runai_project.md | 2 + .../new-cli/runai_project_list.md | 12 ++- .../cli-reference/new-cli/runai_pvc.md | 26 +++++ .../cli-reference/new-cli/runai_pvc_list.md | 46 ++++++++ .../new-cli/runai_pytorch_describe.md | 1 + .../new-cli/runai_pytorch_list.md | 4 +- .../new-cli/runai_pytorch_logs.md | 2 +- .../new-cli/runai_pytorch_submit.md | 12 ++- .../new-cli/runai_tensorflow_describe.md | 1 + .../new-cli/runai_tensorflow_list.md | 4 +- .../new-cli/runai_tensorflow_logs.md | 2 +- .../new-cli/runai_tensorflow_submit.md | 14 +-- .../new-cli/runai_training_describe.md | 1 + .../new-cli/runai_training_list.md | 20 +++- .../new-cli/runai_training_logs.md | 2 +- .../new-cli/runai_training_mpi_describe.md | 1 + .../new-cli/runai_training_mpi_list.md | 4 +- .../new-cli/runai_training_mpi_logs.md | 2 +- .../new-cli/runai_training_mpi_submit.md | 12 ++- .../runai_training_pytorch_describe.md | 1 + .../new-cli/runai_training_pytorch_list.md | 4 +- .../new-cli/runai_training_pytorch_logs.md | 2 +- .../new-cli/runai_training_pytorch_submit.md | 12 ++- .../runai_training_standard_describe.md | 1 + .../new-cli/runai_training_standard_list.md | 4 +- .../new-cli/runai_training_standard_logs.md | 2 +- .../new-cli/runai_training_standard_submit.md | 12 ++- .../new-cli/runai_training_submit.md | 12 ++- .../runai_training_tensorflow_describe.md | 1 + .../new-cli/runai_training_tensorflow_list.md | 4 +- .../new-cli/runai_training_tensorflow_logs.md | 2 +- .../runai_training_tensorflow_submit.md | 14 +-- .../runai_training_xgboost_describe.md | 1 + .../new-cli/runai_training_xgboost_list.md | 4 +- .../new-cli/runai_training_xgboost_logs.md | 2 +- .../new-cli/runai_training_xgboost_submit.md | 12 ++- .../cli-reference/new-cli/runai_upgrade.md | 5 +- .../cli-reference/new-cli/runai_workload.md | 4 +- .../new-cli/runai_workload_delete.md | 48 +++++++++ .../new-cli/runai_workload_describe.md | 1 + .../new-cli/runai_workload_list.md | 2 +- .../new-cli/runai_workspace_describe.md | 1 + .../new-cli/runai_workspace_list.md | 4 +- .../new-cli/runai_workspace_logs.md | 2 +- .../new-cli/runai_workspace_submit.md | 12 ++- .../new-cli/runai_xgboost_describe.md | 1 + .../new-cli/runai_xgboost_list.md | 4 +- .../new-cli/runai_xgboost_logs.md | 2 +- .../new-cli/runai_xgboost_submit.md | 12 ++- 62 files changed, 661 insertions(+), 105 deletions(-) create mode 100644 docs/Researcher/cli-reference/new-cli/runai_config_describe.md create mode 100644 docs/Researcher/cli-reference/new-cli/runai_inference.md create mode 100644 docs/Researcher/cli-reference/new-cli/runai_inference_delete.md create mode 100644 docs/Researcher/cli-reference/new-cli/runai_inference_describe.md create mode 100644 docs/Researcher/cli-reference/new-cli/runai_inference_list.md create mode 100644 docs/Researcher/cli-reference/new-cli/runai_inference_submit.md create mode 100644 docs/Researcher/cli-reference/new-cli/runai_inference_update.md create mode 100644 docs/Researcher/cli-reference/new-cli/runai_pvc.md create mode 100644 docs/Researcher/cli-reference/new-cli/runai_pvc_list.md create mode 100644 docs/Researcher/cli-reference/new-cli/runai_workload_delete.md diff --git a/docs/Researcher/cli-reference/new-cli/runai_config.md b/docs/Researcher/cli-reference/new-cli/runai_config.md index f4eca862d4..194359c907 100644 --- a/docs/Researcher/cli-reference/new-cli/runai_config.md +++ b/docs/Researcher/cli-reference/new-cli/runai_config.md @@ -26,6 +26,7 @@ runai config [flags] ### SEE ALSO * [runai](runai.md) - Run:ai Command-line Interface +* [runai config describe](runai_config_describe.md) - Returns information about the configuration. * [runai config generate](runai_config_generate.md) - generate config file * [runai config set](runai_config_set.md) - Set configuration values diff --git a/docs/Researcher/cli-reference/new-cli/runai_config_describe.md b/docs/Researcher/cli-reference/new-cli/runai_config_describe.md new file mode 100644 index 0000000000..d120b59818 --- /dev/null +++ b/docs/Researcher/cli-reference/new-cli/runai_config_describe.md @@ -0,0 +1,30 @@ +## runai config describe + +Returns information about the configuration. + +``` +runai config describe [flags] +``` + +### Options + +``` + -h, --help help for describe + --json Output structure JSON + --yaml Output structure YAML +``` + +### Options inherited from parent commands + +``` + --config-file string config file name; can be set by environment variable RUNAI_CLI_CONFIG_FILE (default "config.json") + --config-path string config path; can be set by environment variable RUNAI_CLI_CONFIG_PATH + -d, --debug enable debug mode + -q, --quiet enable quiet mode, suppress all output except error messages + --verbose enable verbose mode +``` + +### SEE ALSO + +* [runai config](runai_config.md) - configuration management + diff --git a/docs/Researcher/cli-reference/new-cli/runai_config_generate.md b/docs/Researcher/cli-reference/new-cli/runai_config_generate.md index 05beab2188..495b3c659e 100644 --- a/docs/Researcher/cli-reference/new-cli/runai_config_generate.md +++ b/docs/Researcher/cli-reference/new-cli/runai_config_generate.md @@ -6,13 +6,21 @@ generate config file runai config generate [flags] ``` +### Examples + +``` +# JSON configuration file +runai config generate -f path/to/config.json +# YAML configuration file +runai config generate -f path/to/config.yaml + +``` + ### Options ``` - --file string Output structure to file + -f, --file string file path for generating the configuration -h, --help help for generate - --json Output structure JSON - --yaml Output structure YAML ``` ### Options inherited from parent commands diff --git a/docs/Researcher/cli-reference/new-cli/runai_inference.md b/docs/Researcher/cli-reference/new-cli/runai_inference.md new file mode 100644 index 0000000000..1e8dbb45c0 --- /dev/null +++ b/docs/Researcher/cli-reference/new-cli/runai_inference.md @@ -0,0 +1,29 @@ +## runai inference + +inference management + +### Options + +``` + -h, --help help for inference +``` + +### Options inherited from parent commands + +``` + --config-file string config file name; can be set by environment variable RUNAI_CLI_CONFIG_FILE (default "config.json") + --config-path string config path; can be set by environment variable RUNAI_CLI_CONFIG_PATH + -d, --debug enable debug mode + -q, --quiet enable quiet mode, suppress all output except error messages + --verbose enable verbose mode +``` + +### SEE ALSO + +* [runai](runai.md) - Run:ai Command-line Interface +* [runai inference delete](runai_inference_delete.md) - delete inference workload +* [runai inference describe](runai_inference_describe.md) - describe inference +* [runai inference list](runai_inference_list.md) - list inference +* [runai inference submit](runai_inference_submit.md) - submit inference +* [runai inference update](runai_inference_update.md) - update inference + diff --git a/docs/Researcher/cli-reference/new-cli/runai_inference_delete.md b/docs/Researcher/cli-reference/new-cli/runai_inference_delete.md new file mode 100644 index 0000000000..821670bf7c --- /dev/null +++ b/docs/Researcher/cli-reference/new-cli/runai_inference_delete.md @@ -0,0 +1,43 @@ +## runai inference delete + +delete inference workload + +``` +runai inference delete [WORKLOAD_NAME] [flags] +``` + +### Examples + +``` +# Delete a inference workload with a default project +runai inference delete + +# Delete a inference workload with a specific project +runai inference delete -p + +# Delete a inference workload by UUID +runai inference delete --uuid= -p +``` + +### Options + +``` + -h, --help help for delete + -p, --project string Specify the project to which the command applies. By default, commands apply to the default project. To change the default project use ‘runai config project ’ + -u, --uuid string The UUID of the workload +``` + +### Options inherited from parent commands + +``` + --config-file string config file name; can be set by environment variable RUNAI_CLI_CONFIG_FILE (default "config.json") + --config-path string config path; can be set by environment variable RUNAI_CLI_CONFIG_PATH + -d, --debug enable debug mode + -q, --quiet enable quiet mode, suppress all output except error messages + --verbose enable verbose mode +``` + +### SEE ALSO + +* [runai inference](runai_inference.md) - inference management + diff --git a/docs/Researcher/cli-reference/new-cli/runai_inference_describe.md b/docs/Researcher/cli-reference/new-cli/runai_inference_describe.md new file mode 100644 index 0000000000..9f17ebabb4 --- /dev/null +++ b/docs/Researcher/cli-reference/new-cli/runai_inference_describe.md @@ -0,0 +1,61 @@ +## runai inference describe + +describe inference + +``` +runai inference describe [WORKLOAD_NAME] [flags] +``` + +### Examples + +``` +# Describe a inference workload with a default project +runai inference describe + +# Describe a inference workload in a specific project +runai inference describe -p + +# Describe a inference workload by UUID +runai inference describe --uuid= + +# Describe a inference workload with specific output format +runai inference describe -o json + +# Describe a inference workload with specific sections +runai inference describe --general --compute --pods --events --networks + +# Describe a inference workload with container details and custom limits +runai inference describe --containers --pod-limit 20 --event-limit 100 +``` + +### Options + +``` + --compute Show compute information (default true) + --containers Include container information in pods + --event-limit int32 Limit the number of events displayed (-1 for no limit) (default 50) + --events Show events information (default true) + --general Show general information (default true) + -h, --help help for describe + --networks Show networks information (default true) + -o, --output string Output format (table, json, yaml) (default "table") + --pod-limit int32 Limit the number of pods displayed (-1 for no limit) (default 10) + --pods Show pods information (default true) + -p, --project string Specify the project to which the command applies. By default, commands apply to the default project. To change the default project use ‘runai config project ’ + --sortEvents string Sort the displayed events in ascending/descending order (asc, desc) (default "asc") +``` + +### Options inherited from parent commands + +``` + --config-file string config file name; can be set by environment variable RUNAI_CLI_CONFIG_FILE (default "config.json") + --config-path string config path; can be set by environment variable RUNAI_CLI_CONFIG_PATH + -d, --debug enable debug mode + -q, --quiet enable quiet mode, suppress all output except error messages + --verbose enable verbose mode +``` + +### SEE ALSO + +* [runai inference](runai_inference.md) - inference management + diff --git a/docs/Researcher/cli-reference/new-cli/runai_inference_list.md b/docs/Researcher/cli-reference/new-cli/runai_inference_list.md new file mode 100644 index 0000000000..0692bca132 --- /dev/null +++ b/docs/Researcher/cli-reference/new-cli/runai_inference_list.md @@ -0,0 +1,56 @@ +## runai inference list + +list inference + +``` +runai inference list [flags] +``` + +### Examples + +``` +# List all inference workloads +runai inference list -A + +# List inference workloads with default project +runai inference list + +# List inference workloads in a specific project +runai inference list -p + +# List all inference workloads with a specific output format +runai inference list --yaml + +# List inference workloads with pagination +runai inference list --limit 20 --offset 40 +``` + +### Options + +``` + -A, --all list workloads from all projects + -h, --help help for list + --json Output structure JSON + --limit int32 the maximum number of entries to return (default 50) + --no-headers Output structure table without headers + --offset int32 offset number of limit, default 0 (first offset) + -p, --project string Specify the project to which the command applies. By default, commands apply to the default project. To change the default project use ‘runai config project ’ + --status string filter by workload state + --table Output structure table + --yaml Output structure YAML +``` + +### Options inherited from parent commands + +``` + --config-file string config file name; can be set by environment variable RUNAI_CLI_CONFIG_FILE (default "config.json") + --config-path string config path; can be set by environment variable RUNAI_CLI_CONFIG_PATH + -d, --debug enable debug mode + -q, --quiet enable quiet mode, suppress all output except error messages + --verbose enable verbose mode +``` + +### SEE ALSO + +* [runai inference](runai_inference.md) - inference management + diff --git a/docs/Researcher/cli-reference/new-cli/runai_inference_submit.md b/docs/Researcher/cli-reference/new-cli/runai_inference_submit.md new file mode 100644 index 0000000000..75cb83e6dc --- /dev/null +++ b/docs/Researcher/cli-reference/new-cli/runai_inference_submit.md @@ -0,0 +1,100 @@ +## runai inference submit + +submit inference + +``` +runai inference submit [flags] +``` + +### Examples + +``` + +# Submit a inference workload with scale to zero +runai inference submit -p -i ubuntu --gpu-devices-request 1 +--serving-port=8000 --min-scale=0 --max-scale=1 + +# Submit a inference workload with autoscaling and authorization +runai inference submit -p -i ubuntu --gpu-devices-request 1 +--serving-port=container=8000,authorization-type=authorizedUsersOrGroups,authorized-users=user1:user2,protocol=http +--min-scale=1 --max-scale=4 --metric=concurrency --metric-threshold=100 +``` + +### Options + +``` + --activation-replicas int32 The number of replicas to run when scaling-up from zero. Defaults to minReplicas, or to 1 if minReplicas is set to 0 + --annotation stringArray Set of annotations to populate into the container running the workspace + --attach If true, wait for the pod to start running, and then attach to the pod as if 'runai attach' was called. Attach makes tty and stdin true by default. Defaults to false + --capability stringArray The POSIX capabilities to add when running containers. Defaults to the default set of capabilities granted by the container runtime. + -c, --command If true, override the image's entrypoint with the command supplied after '--' + --concurrency-hard-limit int32 The maximum number of requests allowed to flow to a single replica at any time. 0 means no limit + --configmap-map-volume stringArray Mount ConfigMap as a volume. Use the fhe format name=CONFIGMAP_NAME,path=PATH + --cpu-core-limit float CPU core limit (e.g. 0.5, 1) + --cpu-core-request float CPU core request (e.g. 0.5, 1) + --cpu-memory-limit string CPU memory limit to allocate for the job (e.g. 1G, 500M) + --cpu-memory-request string CPU memory to allocate for the job (e.g. 1G, 500M) + --create-home-dir Create a temporary home directory. Defaults to true when --run-as-user is set, false otherwise + -e, --environment stringArray Set environment variables in the container + --existing-pvc stringArray Mount an existing persistent volume. Use the format: claimname=CLAIM_NAME,path=PATH + --extended-resource stringArray Request access to an extended resource. Use the format: resource_name=quantity + --external-url stringArray Expose URL from the job container. Use the format: container=9443,url=https://external.runai.com,authusers=user1,authgroups=group1 + --git-sync stringArray Specifies git repositories to mount into the container. Use the format: name=NAME,repository=REPO,path=PATH,secret=SECRET,rev=REVISION + -g, --gpu-devices-request int32 GPU units to allocate for the job (e.g. 1, 2) + --gpu-memory-limit string GPU memory limit to allocate for the job (e.g. 1G, 500M) + --gpu-memory-request string GPU memory to allocate for the job (e.g. 1G, 500M) + --gpu-portion-limit float GPU portion limit, must be no less than the gpu-memory-request (between 0 and 1, e.g. 0.5, 0.2) + --gpu-portion-request float GPU portion request (between 0 and 1, e.g. 0.5, 0.2) + --gpu-request-type string GPU request type (portion|memory|migProfile[Deprecated]) + -h, --help help for submit + --host-path stringArray host paths (Volumes) to mount into the container. Format: path=PATH,mount=MOUNT,mount-propagation=None|HostToContainer,readwrite + -i, --image string The image for the workload + --image-pull-policy string Set image pull policy. One of: Always, IfNotPresent, Never. Defaults to Always (default "Always") + --initial-replicas int32 The number of replicas to run when initializing the workload for the first time. Defaults to minReplicas, or to 1 if minReplicas is set to 0 + --initialization-timeout-seconds int32 The maximum amount of time (in seconds) to wait for the container to become ready + --label stringArray Set of labels to populate into the container running the workspace + --large-shm Request large /dev/shm device to mount + --max-replicas int32 The maximum number of replicas for autoscaling. Defaults to minReplicas, or to 1 if minReplicas is set to 0 + --metric string Autoscaling metric is required if minReplicas < maxReplicas, except when minReplicas = 0 and maxReplicas = 1. Use 'throughput', 'concurrency', 'latency', or custom metrics. + --metric-threshold int32 The threshold to use with the specified metric for autoscaling. Mandatory if metric is specified + --metric-threshold-percentage float32 The percentage of metric threshold value to use for autoscaling. Defaults to 70. Applicable only with the 'throughput' and 'concurrency' metrics + --min-replicas int32 The minimum number of replicas for autoscaling. Defaults to 1. Use 0 to allow scale-to-zero + --name-prefix string Set defined prefix for the workload name and add index as a suffix + --new-pvc stringArray Mount a persistent volume, create it if it does not exist. Use the format: claimname=CLAIM_NAME,storageclass=STORAGE_CLASS,size=SIZE,path=PATH,accessmode-rwo,accessmode-rom,accessmode-rwm,ro,ephemeral + --nfs stringArray NFS volumes to use in the workload. Format: path=PATH,server=SERVER,mountpath=MOUNT_PATH,readwrite + --node-pools stringArray List of node pools to use for scheduling the job, ordered by priority + --node-type string Enforce node type affinity by setting a node-type label + --pod-running-timeout duration Pod check for running state timeout. + --port stringArray Expose ports from the job container. Use the format: service-type=NodePort,container=80,external=8080 + --preferred-pod-topology-key string If possible, all pods of this job will be scheduled onto nodes that have a label with this key and identical values + -p, --project string Specify the project to which the command applies. By default, commands apply to the default project. To change the default project use ‘runai config project ’ + --required-pod-topology-key string Enforce scheduling pods of this job onto nodes that have a label with this key and identical values + --run-as-gid int The group ID the container will run with + --run-as-uid int The user ID the container will run with + --run-as-user takes the uid, gid, and supplementary groups fields from the token, if all the fields do not exist, uses the local running terminal user credentials. if any of the fields exist take only the existing fields + --scale-down-delay-seconds int32 The minimum amount of time (in seconds) that a replica will remain active after a scale-down decision + --scale-to-zero-retention-seconds int32 The minimum amount of time (in seconds) that the last replica will remain active after a scale-to-zero decision. Defaults to 0. Available only if minReplicas is set to 0 + --seccomp-profile string Indicates which kind of seccomp profile will be applied to the container, options: RuntimeDefault|Unconfined|Localhost + --secret-volume stringArray Secret volumes to use in the workload. Format: path=PATH,name=SECRET_RESOURCE_NAME + --serving-port string Defines various attributes for the serving port. Usage formats: (1) Simplified format: --serving-port=CONTAINER_PORT (2) Full format: --serving-port=container=CONTAINER_PORT,[authorization-type=public|authenticatedUsers|authorizedUsersOrGroups],[authorized-users=USER1:USER2...],[authorized-groups=GROUP1:GROUP2...],[cluster-local-access-only],[protocol=http|grpc] + --supplemental-groups ints Comma seperated list of groups (IDs) that the user running the container belongs to + --toleration stringArray Toleration details. Use the format: operator=Equal|Exists,key=KEY,[value=VALUE],[effect=NoSchedule|NoExecute|PreferNoSchedule],[seconds=SECONDS] + --user-group-source string Indicate the way to determine the user and group ids of the container, options: fromTheImage|fromIdpToken|fromIdpToken + --wait-for-submit duration Waiting duration for the workload to be created in the cluster. Defaults to 1 minute (1m) + --working-dir string Set the container's working directory +``` + +### Options inherited from parent commands + +``` + --config-file string config file name; can be set by environment variable RUNAI_CLI_CONFIG_FILE (default "config.json") + --config-path string config path; can be set by environment variable RUNAI_CLI_CONFIG_PATH + -d, --debug enable debug mode + -q, --quiet enable quiet mode, suppress all output except error messages + --verbose enable verbose mode +``` + +### SEE ALSO + +* [runai inference](runai_inference.md) - inference management + diff --git a/docs/Researcher/cli-reference/new-cli/runai_inference_update.md b/docs/Researcher/cli-reference/new-cli/runai_inference_update.md new file mode 100644 index 0000000000..4c7b270b97 --- /dev/null +++ b/docs/Researcher/cli-reference/new-cli/runai_inference_update.md @@ -0,0 +1,57 @@ +## runai inference update + +update inference + +``` +runai inference update [WORKLOAD_NAME] [flags] +``` + +### Examples + +``` +# Update a inference workload with a new image +runai inference update -p -i runai.jfrog.io/demo/quickstart-demo + +# Update a inference workload with a new autoscaling configuration +runai inference update -p --max-replicas=5 --min-replicas=3 --metric=latency --metric-threshold=10 + +``` + +### Options + +``` + --activation-replicas int32 The number of replicas to run when scaling-up from zero. Defaults to minReplicas, or to 1 if minReplicas is set to 0 + -c, --command If true, override the image's entrypoint with the command supplied after '--' + --concurrency-hard-limit int32 The maximum number of requests allowed to flow to a single replica at any time. 0 means no limit + --create-home-dir Create a temporary home directory. Defaults to true when --run-as-user is set, false otherwise + -e, --environment stringArray Set environment variables in the container + -h, --help help for update + -i, --image string The image for the workload + --image-pull-policy string Set image pull policy. One of: Always, IfNotPresent, Never. Defaults to Always (default "Always") + --initial-replicas int32 The number of replicas to run when initializing the workload for the first time. Defaults to minReplicas, or to 1 if minReplicas is set to 0 + --initialization-timeout-seconds int32 The maximum amount of time (in seconds) to wait for the container to become ready + --max-replicas int32 The maximum number of replicas for autoscaling. Defaults to minReplicas, or to 1 if minReplicas is set to 0 + --metric string Autoscaling metric is required if minReplicas < maxReplicas, except when minReplicas = 0 and maxReplicas = 1. Use 'throughput', 'concurrency', 'latency', or custom metrics. + --metric-threshold int32 The threshold to use with the specified metric for autoscaling. Mandatory if metric is specified + --metric-threshold-percentage float32 The percentage of metric threshold value to use for autoscaling. Defaults to 70. Applicable only with the 'throughput' and 'concurrency' metrics + --min-replicas int32 The minimum number of replicas for autoscaling. Defaults to 1. Use 0 to allow scale-to-zero + -p, --project string Specify the project to which the command applies. By default, commands apply to the default project. To change the default project use ‘runai config project ’ + --scale-down-delay-seconds int32 The minimum amount of time (in seconds) that a replica will remain active after a scale-down decision + --scale-to-zero-retention-seconds int32 The minimum amount of time (in seconds) that the last replica will remain active after a scale-to-zero decision. Defaults to 0. Available only if minReplicas is set to 0 + --working-dir string Set the container's working directory +``` + +### Options inherited from parent commands + +``` + --config-file string config file name; can be set by environment variable RUNAI_CLI_CONFIG_FILE (default "config.json") + --config-path string config path; can be set by environment variable RUNAI_CLI_CONFIG_PATH + -d, --debug enable debug mode + -q, --quiet enable quiet mode, suppress all output except error messages + --verbose enable verbose mode +``` + +### SEE ALSO + +* [runai inference](runai_inference.md) - inference management + diff --git a/docs/Researcher/cli-reference/new-cli/runai_mpi_describe.md b/docs/Researcher/cli-reference/new-cli/runai_mpi_describe.md index 36be86ca2d..c901ed7c69 100644 --- a/docs/Researcher/cli-reference/new-cli/runai_mpi_describe.md +++ b/docs/Researcher/cli-reference/new-cli/runai_mpi_describe.md @@ -42,6 +42,7 @@ runai training mpi describe --containers --pod-limit 20 --event-limit --pod-limit int32 Limit the number of pods displayed (-1 for no limit) (default 10) --pods Show pods information (default true) -p, --project string Specify the project to which the command applies. By default, commands apply to the default project. To change the default project use ‘runai config project ’ + --sortEvents string Sort the displayed events in ascending/descending order (asc, desc) (default "asc") ``` ### Options inherited from parent commands diff --git a/docs/Researcher/cli-reference/new-cli/runai_mpi_list.md b/docs/Researcher/cli-reference/new-cli/runai_mpi_list.md index b082207c4b..b5c1dff873 100644 --- a/docs/Researcher/cli-reference/new-cli/runai_mpi_list.md +++ b/docs/Researcher/cli-reference/new-cli/runai_mpi_list.md @@ -19,7 +19,7 @@ runai training mpi list runai training mpi list -p # List all mpi training workloads with a specific output format -runai training mpi list -o wide +runai training mpi list --yaml # List mpi training workloads with pagination runai training mpi list --limit 20 --offset 40 @@ -31,7 +31,7 @@ runai training mpi list --limit 20 --offset 40 -A, --all list workloads from all projects -h, --help help for list --json Output structure JSON - --limit int32 number of workload in list (default 50) + --limit int32 the maximum number of entries to return (default 50) --no-headers Output structure table without headers --offset int32 offset number of limit, default 0 (first offset) -p, --project string Specify the project to which the command applies. By default, commands apply to the default project. To change the default project use ‘runai config project ’ diff --git a/docs/Researcher/cli-reference/new-cli/runai_mpi_logs.md b/docs/Researcher/cli-reference/new-cli/runai_mpi_logs.md index 33845d6f35..18cd232675 100644 --- a/docs/Researcher/cli-reference/new-cli/runai_mpi_logs.md +++ b/docs/Researcher/cli-reference/new-cli/runai_mpi_logs.md @@ -56,7 +56,7 @@ runai training mpi logs mpi-01 --wait-timeout=30s -p, --project string Specify the project to which the command applies. By default, commands apply to the default project. To change the default project use ‘runai config project ’ --since duration Return logs newer than a relative duration like 5s, 2m, or 3h. Defaults to all logs --since-time string Return logs after a specific date (RFC3339) - -t, --tail int Numer of tailed lines to fetch from the log, for no limit set to -1 (default -1) + -t, --tail int Number of tailed lines to fetch from the log, for no limit set to -1 (default -1) --timestamps Show timestamps in log output --wait-timeout duration Timeout for waiting for workload to be ready for log streaming ``` diff --git a/docs/Researcher/cli-reference/new-cli/runai_mpi_submit.md b/docs/Researcher/cli-reference/new-cli/runai_mpi_submit.md index c10770275b..2c71eaea03 100644 --- a/docs/Researcher/cli-reference/new-cli/runai_mpi_submit.md +++ b/docs/Researcher/cli-reference/new-cli/runai_mpi_submit.md @@ -46,7 +46,7 @@ runai training mpi submit -p -i ubuntu --master-command "e --cpu-memory-request string CPU memory to allocate for the job (e.g. 1G, 500M) --create-home-dir Create a temporary home directory. Defaults to true when --run-as-user is set, false otherwise -e, --environment stringArray Set environment variables in the container - --existing-pvc stringArray Mount an existing persistent volume. Use the format: claimname=CLAIM_NAME,path=PATH + --existing-pvc stringArray Mount an existing persistent volume. Use the format: claimname=CLAIM_NAME,path=PATH --extended-resource stringArray Request access to an extended resource. Use the format: resource_name=quantity --external-url stringArray Expose URL from the job container. Use the format: container=9443,url=https://external.runai.com,authusers=user1,authgroups=group1 --git-sync stringArray Specifies git repositories to mount into the container. Use the format: name=NAME,repository=REPO,path=PATH,secret=SECRET,rev=REVISION @@ -55,11 +55,11 @@ runai training mpi submit -p -i ubuntu --master-command "e --gpu-memory-request string GPU memory to allocate for the job (e.g. 1G, 500M) --gpu-portion-limit float GPU portion limit, must be no less than the gpu-memory-request (between 0 and 1, e.g. 0.5, 0.2) --gpu-portion-request float GPU portion request (between 0 and 1, e.g. 0.5, 0.2) - --gpu-request-type string GPU request type (portion|memory|migProfile) + --gpu-request-type string GPU request type (portion|memory|migProfile[Deprecated]) -h, --help help for submit --host-ipc Whether to enable host IPC. (Default: false) --host-network Whether to enable host networking. (Default: false) - --host-path stringArray Volumes to mount into the container. Use the format: path=PATH,mount=MOUNT,mount-propagation=None|HostToContainer,readwrite + --host-path stringArray host paths (Volumes) to mount into the container. Format: path=PATH,mount=MOUNT,mount-propagation=None|HostToContainer,readwrite -i, --image string The image for the workload --image-pull-policy string Set image pull policy. One of: Always, IfNotPresent, Never. Defaults to Always (default "Always") --label stringArray Set of labels to populate into the container running the workspace @@ -71,7 +71,7 @@ runai training mpi submit -p -i ubuntu --master-command "e --master-no-pvcs Do not mount any persistent volumes in the master pod --name-prefix string Set defined prefix for the workload name and add index as a suffix --new-pvc stringArray Mount a persistent volume, create it if it does not exist. Use the format: claimname=CLAIM_NAME,storageclass=STORAGE_CLASS,size=SIZE,path=PATH,accessmode-rwo,accessmode-rom,accessmode-rwm,ro,ephemeral - --nfs stringArray NFS storage details. Use the format: path=PATH,server=SERVER,mountpath=MOUNT_PATH,readwrite + --nfs stringArray NFS volumes to use in the workload. Format: path=PATH,server=SERVER,mountpath=MOUNT_PATH,readwrite --node-pools stringArray List of node pools to use for scheduling the job, ordered by priority --node-type string Enforce node type affinity by setting a node-type label --pod-running-timeout duration Pod check for running state timeout. @@ -82,11 +82,13 @@ runai training mpi submit -p -i ubuntu --master-command "e --run-as-gid int The group ID the container will run with --run-as-uid int The user ID the container will run with --run-as-user takes the uid, gid, and supplementary groups fields from the token, if all the fields do not exist, uses the local running terminal user credentials. if any of the fields exist take only the existing fields - --s3 stringArray s3 storage details. Use the format: name=NAME,bucket=BUCKET,path=PATH,accesskey=ACCESS_KEY,url=URL + --s3 stringArray s3 buckets to use in the workload. Format: name=NAME,bucket=BUCKET,path=PATH,accesskey=ACCESS_KEY,url=URL --seccomp-profile string Indicates which kind of seccomp profile will be applied to the container, options: RuntimeDefault|Unconfined|Localhost + --secret-volume stringArray Secret volumes to use in the workload. Format: path=PATH,name=SECRET_RESOURCE_NAME --slots-per-worker int32 Number of slots to allocate for each worker --stdin Keep stdin open on the container(s) in the pod, even if nothing is attached --supplemental-groups ints Comma seperated list of groups (IDs) that the user running the container belongs to + --termination-grace-period duration The length of time (like 5s or 2m, higher than zero) the workload's pod is expected to terminate gracefully upon probe failure. In case value is not specified, kubernetes default of 30 seconds applies (default 0s) --toleration stringArray Toleration details. Use the format: operator=Equal|Exists,key=KEY,[value=VALUE],[effect=NoSchedule|NoExecute|PreferNoSchedule],[seconds=SECONDS] -t, --tty Allocate a TTY for the container --user-group-source string Indicate the way to determine the user and group ids of the container, options: fromTheImage|fromIdpToken|fromIdpToken diff --git a/docs/Researcher/cli-reference/new-cli/runai_project.md b/docs/Researcher/cli-reference/new-cli/runai_project.md index 9a9a75c483..4e20e249fd 100644 --- a/docs/Researcher/cli-reference/new-cli/runai_project.md +++ b/docs/Researcher/cli-reference/new-cli/runai_project.md @@ -11,6 +11,8 @@ runai project [flags] ``` -h, --help help for project --interactive enable set interactive mode (enabled|disabled) + --limit int32 the maximum number of entries to return (default 50) + --offset int32 offset number of limit, default 0 (first offset) ``` ### Options inherited from parent commands diff --git a/docs/Researcher/cli-reference/new-cli/runai_project_list.md b/docs/Researcher/cli-reference/new-cli/runai_project_list.md index 6a5608c1f6..0d09aa8a07 100644 --- a/docs/Researcher/cli-reference/new-cli/runai_project_list.md +++ b/docs/Researcher/cli-reference/new-cli/runai_project_list.md @@ -9,11 +9,13 @@ runai project list [flags] ### Options ``` - -h, --help help for list - --json Output structure JSON - --no-headers Output structure table without headers - --table Output structure table - --yaml Output structure YAML + -h, --help help for list + --json Output structure JSON + --limit int32 the maximum number of entries to return (default 50) + --no-headers Output structure table without headers + --offset int32 offset number of limit, default 0 (first offset) + --table Output structure table + --yaml Output structure YAML ``` ### Options inherited from parent commands diff --git a/docs/Researcher/cli-reference/new-cli/runai_pvc.md b/docs/Researcher/cli-reference/new-cli/runai_pvc.md new file mode 100644 index 0000000000..3c03d64996 --- /dev/null +++ b/docs/Researcher/cli-reference/new-cli/runai_pvc.md @@ -0,0 +1,26 @@ +## runai pvc + +PVC management + +### Options + +``` + -h, --help help for pvc + --interactive enable set interactive mode (enabled|disabled) +``` + +### Options inherited from parent commands + +``` + --config-file string config file name; can be set by environment variable RUNAI_CLI_CONFIG_FILE (default "config.json") + --config-path string config path; can be set by environment variable RUNAI_CLI_CONFIG_PATH + -d, --debug enable debug mode + -q, --quiet enable quiet mode, suppress all output except error messages + --verbose enable verbose mode +``` + +### SEE ALSO + +* [runai](runai.md) - Run:ai Command-line Interface +* [runai pvc list](runai_pvc_list.md) - List PVC + diff --git a/docs/Researcher/cli-reference/new-cli/runai_pvc_list.md b/docs/Researcher/cli-reference/new-cli/runai_pvc_list.md new file mode 100644 index 0000000000..ac93acfcf0 --- /dev/null +++ b/docs/Researcher/cli-reference/new-cli/runai_pvc_list.md @@ -0,0 +1,46 @@ +## runai pvc list + +List PVC + +``` +runai pvc list [flags] +``` + +### Examples + +``` +# List of PVC by project with table output +runai pvc list -p + +# List of PVC by project with table JSON output +runai pvc list -p --json + +# List of PVC by project with YAML output +runai pvc list -p --yaml +``` + +### Options + +``` + -h, --help help for list + --json Output structure JSON + --no-headers Output structure table without headers + -p, --project string Specify the project to which the command applies. By default, commands apply to the default project. To change the default project use ‘runai config project ’ + --table Output structure table + --yaml Output structure YAML +``` + +### Options inherited from parent commands + +``` + --config-file string config file name; can be set by environment variable RUNAI_CLI_CONFIG_FILE (default "config.json") + --config-path string config path; can be set by environment variable RUNAI_CLI_CONFIG_PATH + -d, --debug enable debug mode + -q, --quiet enable quiet mode, suppress all output except error messages + --verbose enable verbose mode +``` + +### SEE ALSO + +* [runai pvc](runai_pvc.md) - PVC management + diff --git a/docs/Researcher/cli-reference/new-cli/runai_pytorch_describe.md b/docs/Researcher/cli-reference/new-cli/runai_pytorch_describe.md index 21414a218b..a45656a833 100644 --- a/docs/Researcher/cli-reference/new-cli/runai_pytorch_describe.md +++ b/docs/Researcher/cli-reference/new-cli/runai_pytorch_describe.md @@ -42,6 +42,7 @@ runai training pytorch describe --containers --pod-limit 20 --eve --pod-limit int32 Limit the number of pods displayed (-1 for no limit) (default 10) --pods Show pods information (default true) -p, --project string Specify the project to which the command applies. By default, commands apply to the default project. To change the default project use ‘runai config project ’ + --sortEvents string Sort the displayed events in ascending/descending order (asc, desc) (default "asc") ``` ### Options inherited from parent commands diff --git a/docs/Researcher/cli-reference/new-cli/runai_pytorch_list.md b/docs/Researcher/cli-reference/new-cli/runai_pytorch_list.md index 53b9ad368d..8c03834c51 100644 --- a/docs/Researcher/cli-reference/new-cli/runai_pytorch_list.md +++ b/docs/Researcher/cli-reference/new-cli/runai_pytorch_list.md @@ -19,7 +19,7 @@ runai training pytorch list runai training pytorch list -p # List all pytorch training workloads with a specific output format -runai training pytorch list -o wide +runai training pytorch list --yaml # List pytorch training workloads with pagination runai training pytorch list --limit 20 --offset 40 @@ -31,7 +31,7 @@ runai training pytorch list --limit 20 --offset 40 -A, --all list workloads from all projects -h, --help help for list --json Output structure JSON - --limit int32 number of workload in list (default 50) + --limit int32 the maximum number of entries to return (default 50) --no-headers Output structure table without headers --offset int32 offset number of limit, default 0 (first offset) -p, --project string Specify the project to which the command applies. By default, commands apply to the default project. To change the default project use ‘runai config project ’ diff --git a/docs/Researcher/cli-reference/new-cli/runai_pytorch_logs.md b/docs/Researcher/cli-reference/new-cli/runai_pytorch_logs.md index 038eab822a..1592874a1b 100644 --- a/docs/Researcher/cli-reference/new-cli/runai_pytorch_logs.md +++ b/docs/Researcher/cli-reference/new-cli/runai_pytorch_logs.md @@ -56,7 +56,7 @@ runai training pytorch logs pytorch-01 --wait-timeout=30s -p, --project string Specify the project to which the command applies. By default, commands apply to the default project. To change the default project use ‘runai config project ’ --since duration Return logs newer than a relative duration like 5s, 2m, or 3h. Defaults to all logs --since-time string Return logs after a specific date (RFC3339) - -t, --tail int Numer of tailed lines to fetch from the log, for no limit set to -1 (default -1) + -t, --tail int Number of tailed lines to fetch from the log, for no limit set to -1 (default -1) --timestamps Show timestamps in log output --wait-timeout duration Timeout for waiting for workload to be ready for log streaming ``` diff --git a/docs/Researcher/cli-reference/new-cli/runai_pytorch_submit.md b/docs/Researcher/cli-reference/new-cli/runai_pytorch_submit.md index bf07428fc9..5ca1dc13a3 100644 --- a/docs/Researcher/cli-reference/new-cli/runai_pytorch_submit.md +++ b/docs/Researcher/cli-reference/new-cli/runai_pytorch_submit.md @@ -46,7 +46,7 @@ runai training pytorch submit -p -i ubuntu --master-comman --cpu-memory-request string CPU memory to allocate for the job (e.g. 1G, 500M) --create-home-dir Create a temporary home directory. Defaults to true when --run-as-user is set, false otherwise -e, --environment stringArray Set environment variables in the container - --existing-pvc stringArray Mount an existing persistent volume. Use the format: claimname=CLAIM_NAME,path=PATH + --existing-pvc stringArray Mount an existing persistent volume. Use the format: claimname=CLAIM_NAME,path=PATH --extended-resource stringArray Request access to an extended resource. Use the format: resource_name=quantity --external-url stringArray Expose URL from the job container. Use the format: container=9443,url=https://external.runai.com,authusers=user1,authgroups=group1 --git-sync stringArray Specifies git repositories to mount into the container. Use the format: name=NAME,repository=REPO,path=PATH,secret=SECRET,rev=REVISION @@ -55,11 +55,11 @@ runai training pytorch submit -p -i ubuntu --master-comman --gpu-memory-request string GPU memory to allocate for the job (e.g. 1G, 500M) --gpu-portion-limit float GPU portion limit, must be no less than the gpu-memory-request (between 0 and 1, e.g. 0.5, 0.2) --gpu-portion-request float GPU portion request (between 0 and 1, e.g. 0.5, 0.2) - --gpu-request-type string GPU request type (portion|memory|migProfile) + --gpu-request-type string GPU request type (portion|memory|migProfile[Deprecated]) -h, --help help for submit --host-ipc Whether to enable host IPC. (Default: false) --host-network Whether to enable host networking. (Default: false) - --host-path stringArray Volumes to mount into the container. Use the format: path=PATH,mount=MOUNT,mount-propagation=None|HostToContainer,readwrite + --host-path stringArray host paths (Volumes) to mount into the container. Format: path=PATH,mount=MOUNT,mount-propagation=None|HostToContainer,readwrite -i, --image string The image for the workload --image-pull-policy string Set image pull policy. One of: Always, IfNotPresent, Never. Defaults to Always (default "Always") --label stringArray Set of labels to populate into the container running the workspace @@ -76,7 +76,7 @@ runai training pytorch submit -p -i ubuntu --master-comman --min-replicas int32 Minimum number of replicas for an elastic PyTorch job --name-prefix string Set defined prefix for the workload name and add index as a suffix --new-pvc stringArray Mount a persistent volume, create it if it does not exist. Use the format: claimname=CLAIM_NAME,storageclass=STORAGE_CLASS,size=SIZE,path=PATH,accessmode-rwo,accessmode-rom,accessmode-rwm,ro,ephemeral - --nfs stringArray NFS storage details. Use the format: path=PATH,server=SERVER,mountpath=MOUNT_PATH,readwrite + --nfs stringArray NFS volumes to use in the workload. Format: path=PATH,server=SERVER,mountpath=MOUNT_PATH,readwrite --no-master Do not create a separate pod for the master --node-pools stringArray List of node pools to use for scheduling the job, ordered by priority --node-type string Enforce node type affinity by setting a node-type label @@ -88,10 +88,12 @@ runai training pytorch submit -p -i ubuntu --master-comman --run-as-gid int The group ID the container will run with --run-as-uid int The user ID the container will run with --run-as-user takes the uid, gid, and supplementary groups fields from the token, if all the fields do not exist, uses the local running terminal user credentials. if any of the fields exist take only the existing fields - --s3 stringArray s3 storage details. Use the format: name=NAME,bucket=BUCKET,path=PATH,accesskey=ACCESS_KEY,url=URL + --s3 stringArray s3 buckets to use in the workload. Format: name=NAME,bucket=BUCKET,path=PATH,accesskey=ACCESS_KEY,url=URL --seccomp-profile string Indicates which kind of seccomp profile will be applied to the container, options: RuntimeDefault|Unconfined|Localhost + --secret-volume stringArray Secret volumes to use in the workload. Format: path=PATH,name=SECRET_RESOURCE_NAME --stdin Keep stdin open on the container(s) in the pod, even if nothing is attached --supplemental-groups ints Comma seperated list of groups (IDs) that the user running the container belongs to + --termination-grace-period duration The length of time (like 5s or 2m, higher than zero) the workload's pod is expected to terminate gracefully upon probe failure. In case value is not specified, kubernetes default of 30 seconds applies (default 0s) --toleration stringArray Toleration details. Use the format: operator=Equal|Exists,key=KEY,[value=VALUE],[effect=NoSchedule|NoExecute|PreferNoSchedule],[seconds=SECONDS] -t, --tty Allocate a TTY for the container --user-group-source string Indicate the way to determine the user and group ids of the container, options: fromTheImage|fromIdpToken|fromIdpToken diff --git a/docs/Researcher/cli-reference/new-cli/runai_tensorflow_describe.md b/docs/Researcher/cli-reference/new-cli/runai_tensorflow_describe.md index 1e8d75fb4e..d85c9b77ec 100644 --- a/docs/Researcher/cli-reference/new-cli/runai_tensorflow_describe.md +++ b/docs/Researcher/cli-reference/new-cli/runai_tensorflow_describe.md @@ -42,6 +42,7 @@ runai training tf describe --containers --pod-limit 20 --event-limit 1 --pod-limit int32 Limit the number of pods displayed (-1 for no limit) (default 10) --pods Show pods information (default true) -p, --project string Specify the project to which the command applies. By default, commands apply to the default project. To change the default project use ‘runai config project ’ + --sortEvents string Sort the displayed events in ascending/descending order (asc, desc) (default "asc") ``` ### Options inherited from parent commands diff --git a/docs/Researcher/cli-reference/new-cli/runai_tensorflow_list.md b/docs/Researcher/cli-reference/new-cli/runai_tensorflow_list.md index 756fe5c99e..7b8b5e393b 100644 --- a/docs/Researcher/cli-reference/new-cli/runai_tensorflow_list.md +++ b/docs/Researcher/cli-reference/new-cli/runai_tensorflow_list.md @@ -19,7 +19,7 @@ runai training tf list runai training tf list -p # List all tf training workloads with a specific output format -runai training tf list -o wide +runai training tf list --yaml # List tf training workloads with pagination runai training tf list --limit 20 --offset 40 @@ -31,7 +31,7 @@ runai training tf list --limit 20 --offset 40 -A, --all list workloads from all projects -h, --help help for list --json Output structure JSON - --limit int32 number of workload in list (default 50) + --limit int32 the maximum number of entries to return (default 50) --no-headers Output structure table without headers --offset int32 offset number of limit, default 0 (first offset) -p, --project string Specify the project to which the command applies. By default, commands apply to the default project. To change the default project use ‘runai config project ’ diff --git a/docs/Researcher/cli-reference/new-cli/runai_tensorflow_logs.md b/docs/Researcher/cli-reference/new-cli/runai_tensorflow_logs.md index 552f77c0a3..b09e34d920 100644 --- a/docs/Researcher/cli-reference/new-cli/runai_tensorflow_logs.md +++ b/docs/Researcher/cli-reference/new-cli/runai_tensorflow_logs.md @@ -56,7 +56,7 @@ runai training tf logs tf-01 --wait-timeout=30s -p, --project string Specify the project to which the command applies. By default, commands apply to the default project. To change the default project use ‘runai config project ’ --since duration Return logs newer than a relative duration like 5s, 2m, or 3h. Defaults to all logs --since-time string Return logs after a specific date (RFC3339) - -t, --tail int Numer of tailed lines to fetch from the log, for no limit set to -1 (default -1) + -t, --tail int Number of tailed lines to fetch from the log, for no limit set to -1 (default -1) --timestamps Show timestamps in log output --wait-timeout duration Timeout for waiting for workload to be ready for log streaming ``` diff --git a/docs/Researcher/cli-reference/new-cli/runai_tensorflow_submit.md b/docs/Researcher/cli-reference/new-cli/runai_tensorflow_submit.md index 2062f58430..16cb4592fe 100644 --- a/docs/Researcher/cli-reference/new-cli/runai_tensorflow_submit.md +++ b/docs/Researcher/cli-reference/new-cli/runai_tensorflow_submit.md @@ -46,7 +46,7 @@ runai training tf submit -p -i ubuntu --master-command "ec --cpu-memory-request string CPU memory to allocate for the job (e.g. 1G, 500M) --create-home-dir Create a temporary home directory. Defaults to true when --run-as-user is set, false otherwise -e, --environment stringArray Set environment variables in the container - --existing-pvc stringArray Mount an existing persistent volume. Use the format: claimname=CLAIM_NAME,path=PATH + --existing-pvc stringArray Mount an existing persistent volume. Use the format: claimname=CLAIM_NAME,path=PATH --extended-resource stringArray Request access to an extended resource. Use the format: resource_name=quantity --external-url stringArray Expose URL from the job container. Use the format: container=9443,url=https://external.runai.com,authusers=user1,authgroups=group1 --git-sync stringArray Specifies git repositories to mount into the container. Use the format: name=NAME,repository=REPO,path=PATH,secret=SECRET,rev=REVISION @@ -55,11 +55,11 @@ runai training tf submit -p -i ubuntu --master-command "ec --gpu-memory-request string GPU memory to allocate for the job (e.g. 1G, 500M) --gpu-portion-limit float GPU portion limit, must be no less than the gpu-memory-request (between 0 and 1, e.g. 0.5, 0.2) --gpu-portion-request float GPU portion request (between 0 and 1, e.g. 0.5, 0.2) - --gpu-request-type string GPU request type (portion|memory|migProfile) + --gpu-request-type string GPU request type (portion|memory|migProfile[Deprecated]) -h, --help help for submit --host-ipc Whether to enable host IPC. (Default: false) --host-network Whether to enable host networking. (Default: false) - --host-path stringArray Volumes to mount into the container. Use the format: path=PATH,mount=MOUNT,mount-propagation=None|HostToContainer,readwrite + --host-path stringArray host paths (Volumes) to mount into the container. Format: path=PATH,mount=MOUNT,mount-propagation=None|HostToContainer,readwrite -i, --image string The image for the workload --image-pull-policy string Set image pull policy. One of: Always, IfNotPresent, Never. Defaults to Always (default "Always") --label stringArray Set of labels to populate into the container running the workspace @@ -72,11 +72,9 @@ runai training tf submit -p -i ubuntu --master-command "ec --master-gpu-portion-limit float GPU portion limit, must be no less than the gpu-memory-request (between 0 and 1, e.g. 0.5, 0.2) --master-gpu-portion-request float GPU portion request (between 0 and 1, e.g. 0.5, 0.2) --master-no-pvcs Do not mount any persistent volumes in the master pod - --max-replicas int32 Maximum number of replicas for an elastic PyTorch job - --min-replicas int32 Minimum number of replicas for an elastic PyTorch job --name-prefix string Set defined prefix for the workload name and add index as a suffix --new-pvc stringArray Mount a persistent volume, create it if it does not exist. Use the format: claimname=CLAIM_NAME,storageclass=STORAGE_CLASS,size=SIZE,path=PATH,accessmode-rwo,accessmode-rom,accessmode-rwm,ro,ephemeral - --nfs stringArray NFS storage details. Use the format: path=PATH,server=SERVER,mountpath=MOUNT_PATH,readwrite + --nfs stringArray NFS volumes to use in the workload. Format: path=PATH,server=SERVER,mountpath=MOUNT_PATH,readwrite --no-master Do not create a separate pod for the master --node-pools stringArray List of node pools to use for scheduling the job, ordered by priority --node-type string Enforce node type affinity by setting a node-type label @@ -88,10 +86,12 @@ runai training tf submit -p -i ubuntu --master-command "ec --run-as-gid int The group ID the container will run with --run-as-uid int The user ID the container will run with --run-as-user takes the uid, gid, and supplementary groups fields from the token, if all the fields do not exist, uses the local running terminal user credentials. if any of the fields exist take only the existing fields - --s3 stringArray s3 storage details. Use the format: name=NAME,bucket=BUCKET,path=PATH,accesskey=ACCESS_KEY,url=URL + --s3 stringArray s3 buckets to use in the workload. Format: name=NAME,bucket=BUCKET,path=PATH,accesskey=ACCESS_KEY,url=URL --seccomp-profile string Indicates which kind of seccomp profile will be applied to the container, options: RuntimeDefault|Unconfined|Localhost + --secret-volume stringArray Secret volumes to use in the workload. Format: path=PATH,name=SECRET_RESOURCE_NAME --stdin Keep stdin open on the container(s) in the pod, even if nothing is attached --supplemental-groups ints Comma seperated list of groups (IDs) that the user running the container belongs to + --termination-grace-period duration The length of time (like 5s or 2m, higher than zero) the workload's pod is expected to terminate gracefully upon probe failure. In case value is not specified, kubernetes default of 30 seconds applies (default 0s) --toleration stringArray Toleration details. Use the format: operator=Equal|Exists,key=KEY,[value=VALUE],[effect=NoSchedule|NoExecute|PreferNoSchedule],[seconds=SECONDS] -t, --tty Allocate a TTY for the container --user-group-source string Indicate the way to determine the user and group ids of the container, options: fromTheImage|fromIdpToken|fromIdpToken diff --git a/docs/Researcher/cli-reference/new-cli/runai_training_describe.md b/docs/Researcher/cli-reference/new-cli/runai_training_describe.md index 62ff72a77f..2d5c1d27bc 100644 --- a/docs/Researcher/cli-reference/new-cli/runai_training_describe.md +++ b/docs/Researcher/cli-reference/new-cli/runai_training_describe.md @@ -42,6 +42,7 @@ runai training standard describe --containers --pod-limit 20 --e --pod-limit int32 Limit the number of pods displayed (-1 for no limit) (default 10) --pods Show pods information (default true) -p, --project string Specify the project to which the command applies. By default, commands apply to the default project. To change the default project use ‘runai config project ’ + --sortEvents string Sort the displayed events in ascending/descending order (asc, desc) (default "asc") ``` ### Options inherited from parent commands diff --git a/docs/Researcher/cli-reference/new-cli/runai_training_list.md b/docs/Researcher/cli-reference/new-cli/runai_training_list.md index afbf2aae6e..0bd89c6683 100644 --- a/docs/Researcher/cli-reference/new-cli/runai_training_list.md +++ b/docs/Researcher/cli-reference/new-cli/runai_training_list.md @@ -9,8 +9,24 @@ runai training list [flags] ### Examples ``` +# List all training workloads runai training list -A -runai training list --state= --limit=20 + +# List training workloads in a specific project +runai training list -p + +# List training workloads filtered by status +runai training list --status + +# List training workloads filtered by framework +runai training list --framework + +# List training workloads with a specific output format +runai training list --yaml + +# List training workloads with pagination +runai training list --limit 20 --offset 40 + ``` ### Options @@ -20,7 +36,7 @@ runai training list --state= --limit=20 --framework string filter by workload framework -h, --help help for list --json Output structure JSON - --limit int32 number of workload in list (default 50) + --limit int32 the maximum number of entries to return (default 50) --no-headers Output structure table without headers --offset int32 offset number of limit, default 0 (first offset) -p, --project string Specify the project to which the command applies. By default, commands apply to the default project. To change the default project use ‘runai config project ’ diff --git a/docs/Researcher/cli-reference/new-cli/runai_training_logs.md b/docs/Researcher/cli-reference/new-cli/runai_training_logs.md index 60d1cfda8c..c3c8d91eb4 100644 --- a/docs/Researcher/cli-reference/new-cli/runai_training_logs.md +++ b/docs/Researcher/cli-reference/new-cli/runai_training_logs.md @@ -56,7 +56,7 @@ runai training standard logs standard-01 --wait-timeout=30s -p, --project string Specify the project to which the command applies. By default, commands apply to the default project. To change the default project use ‘runai config project ’ --since duration Return logs newer than a relative duration like 5s, 2m, or 3h. Defaults to all logs --since-time string Return logs after a specific date (RFC3339) - -t, --tail int Numer of tailed lines to fetch from the log, for no limit set to -1 (default -1) + -t, --tail int Number of tailed lines to fetch from the log, for no limit set to -1 (default -1) --timestamps Show timestamps in log output --wait-timeout duration Timeout for waiting for workload to be ready for log streaming ``` diff --git a/docs/Researcher/cli-reference/new-cli/runai_training_mpi_describe.md b/docs/Researcher/cli-reference/new-cli/runai_training_mpi_describe.md index 2f91992331..f2364e81eb 100644 --- a/docs/Researcher/cli-reference/new-cli/runai_training_mpi_describe.md +++ b/docs/Researcher/cli-reference/new-cli/runai_training_mpi_describe.md @@ -42,6 +42,7 @@ runai training mpi describe --containers --pod-limit 20 --event-limit --pod-limit int32 Limit the number of pods displayed (-1 for no limit) (default 10) --pods Show pods information (default true) -p, --project string Specify the project to which the command applies. By default, commands apply to the default project. To change the default project use ‘runai config project ’ + --sortEvents string Sort the displayed events in ascending/descending order (asc, desc) (default "asc") ``` ### Options inherited from parent commands diff --git a/docs/Researcher/cli-reference/new-cli/runai_training_mpi_list.md b/docs/Researcher/cli-reference/new-cli/runai_training_mpi_list.md index 5dcced8957..1e89f4c769 100644 --- a/docs/Researcher/cli-reference/new-cli/runai_training_mpi_list.md +++ b/docs/Researcher/cli-reference/new-cli/runai_training_mpi_list.md @@ -19,7 +19,7 @@ runai training mpi list runai training mpi list -p # List all mpi training workloads with a specific output format -runai training mpi list -o wide +runai training mpi list --yaml # List mpi training workloads with pagination runai training mpi list --limit 20 --offset 40 @@ -31,7 +31,7 @@ runai training mpi list --limit 20 --offset 40 -A, --all list workloads from all projects -h, --help help for list --json Output structure JSON - --limit int32 number of workload in list (default 50) + --limit int32 the maximum number of entries to return (default 50) --no-headers Output structure table without headers --offset int32 offset number of limit, default 0 (first offset) -p, --project string Specify the project to which the command applies. By default, commands apply to the default project. To change the default project use ‘runai config project ’ diff --git a/docs/Researcher/cli-reference/new-cli/runai_training_mpi_logs.md b/docs/Researcher/cli-reference/new-cli/runai_training_mpi_logs.md index 63820b1afa..fb1f377cb5 100644 --- a/docs/Researcher/cli-reference/new-cli/runai_training_mpi_logs.md +++ b/docs/Researcher/cli-reference/new-cli/runai_training_mpi_logs.md @@ -56,7 +56,7 @@ runai training mpi logs mpi-01 --wait-timeout=30s -p, --project string Specify the project to which the command applies. By default, commands apply to the default project. To change the default project use ‘runai config project ’ --since duration Return logs newer than a relative duration like 5s, 2m, or 3h. Defaults to all logs --since-time string Return logs after a specific date (RFC3339) - -t, --tail int Numer of tailed lines to fetch from the log, for no limit set to -1 (default -1) + -t, --tail int Number of tailed lines to fetch from the log, for no limit set to -1 (default -1) --timestamps Show timestamps in log output --wait-timeout duration Timeout for waiting for workload to be ready for log streaming ``` diff --git a/docs/Researcher/cli-reference/new-cli/runai_training_mpi_submit.md b/docs/Researcher/cli-reference/new-cli/runai_training_mpi_submit.md index c6640c4768..803a82a81b 100644 --- a/docs/Researcher/cli-reference/new-cli/runai_training_mpi_submit.md +++ b/docs/Researcher/cli-reference/new-cli/runai_training_mpi_submit.md @@ -46,7 +46,7 @@ runai training mpi submit -p -i ubuntu --master-command "e --cpu-memory-request string CPU memory to allocate for the job (e.g. 1G, 500M) --create-home-dir Create a temporary home directory. Defaults to true when --run-as-user is set, false otherwise -e, --environment stringArray Set environment variables in the container - --existing-pvc stringArray Mount an existing persistent volume. Use the format: claimname=CLAIM_NAME,path=PATH + --existing-pvc stringArray Mount an existing persistent volume. Use the format: claimname=CLAIM_NAME,path=PATH --extended-resource stringArray Request access to an extended resource. Use the format: resource_name=quantity --external-url stringArray Expose URL from the job container. Use the format: container=9443,url=https://external.runai.com,authusers=user1,authgroups=group1 --git-sync stringArray Specifies git repositories to mount into the container. Use the format: name=NAME,repository=REPO,path=PATH,secret=SECRET,rev=REVISION @@ -55,11 +55,11 @@ runai training mpi submit -p -i ubuntu --master-command "e --gpu-memory-request string GPU memory to allocate for the job (e.g. 1G, 500M) --gpu-portion-limit float GPU portion limit, must be no less than the gpu-memory-request (between 0 and 1, e.g. 0.5, 0.2) --gpu-portion-request float GPU portion request (between 0 and 1, e.g. 0.5, 0.2) - --gpu-request-type string GPU request type (portion|memory|migProfile) + --gpu-request-type string GPU request type (portion|memory|migProfile[Deprecated]) -h, --help help for submit --host-ipc Whether to enable host IPC. (Default: false) --host-network Whether to enable host networking. (Default: false) - --host-path stringArray Volumes to mount into the container. Use the format: path=PATH,mount=MOUNT,mount-propagation=None|HostToContainer,readwrite + --host-path stringArray host paths (Volumes) to mount into the container. Format: path=PATH,mount=MOUNT,mount-propagation=None|HostToContainer,readwrite -i, --image string The image for the workload --image-pull-policy string Set image pull policy. One of: Always, IfNotPresent, Never. Defaults to Always (default "Always") --label stringArray Set of labels to populate into the container running the workspace @@ -71,7 +71,7 @@ runai training mpi submit -p -i ubuntu --master-command "e --master-no-pvcs Do not mount any persistent volumes in the master pod --name-prefix string Set defined prefix for the workload name and add index as a suffix --new-pvc stringArray Mount a persistent volume, create it if it does not exist. Use the format: claimname=CLAIM_NAME,storageclass=STORAGE_CLASS,size=SIZE,path=PATH,accessmode-rwo,accessmode-rom,accessmode-rwm,ro,ephemeral - --nfs stringArray NFS storage details. Use the format: path=PATH,server=SERVER,mountpath=MOUNT_PATH,readwrite + --nfs stringArray NFS volumes to use in the workload. Format: path=PATH,server=SERVER,mountpath=MOUNT_PATH,readwrite --node-pools stringArray List of node pools to use for scheduling the job, ordered by priority --node-type string Enforce node type affinity by setting a node-type label --pod-running-timeout duration Pod check for running state timeout. @@ -82,11 +82,13 @@ runai training mpi submit -p -i ubuntu --master-command "e --run-as-gid int The group ID the container will run with --run-as-uid int The user ID the container will run with --run-as-user takes the uid, gid, and supplementary groups fields from the token, if all the fields do not exist, uses the local running terminal user credentials. if any of the fields exist take only the existing fields - --s3 stringArray s3 storage details. Use the format: name=NAME,bucket=BUCKET,path=PATH,accesskey=ACCESS_KEY,url=URL + --s3 stringArray s3 buckets to use in the workload. Format: name=NAME,bucket=BUCKET,path=PATH,accesskey=ACCESS_KEY,url=URL --seccomp-profile string Indicates which kind of seccomp profile will be applied to the container, options: RuntimeDefault|Unconfined|Localhost + --secret-volume stringArray Secret volumes to use in the workload. Format: path=PATH,name=SECRET_RESOURCE_NAME --slots-per-worker int32 Number of slots to allocate for each worker --stdin Keep stdin open on the container(s) in the pod, even if nothing is attached --supplemental-groups ints Comma seperated list of groups (IDs) that the user running the container belongs to + --termination-grace-period duration The length of time (like 5s or 2m, higher than zero) the workload's pod is expected to terminate gracefully upon probe failure. In case value is not specified, kubernetes default of 30 seconds applies (default 0s) --toleration stringArray Toleration details. Use the format: operator=Equal|Exists,key=KEY,[value=VALUE],[effect=NoSchedule|NoExecute|PreferNoSchedule],[seconds=SECONDS] -t, --tty Allocate a TTY for the container --user-group-source string Indicate the way to determine the user and group ids of the container, options: fromTheImage|fromIdpToken|fromIdpToken diff --git a/docs/Researcher/cli-reference/new-cli/runai_training_pytorch_describe.md b/docs/Researcher/cli-reference/new-cli/runai_training_pytorch_describe.md index ad0df0a751..4bde2df516 100644 --- a/docs/Researcher/cli-reference/new-cli/runai_training_pytorch_describe.md +++ b/docs/Researcher/cli-reference/new-cli/runai_training_pytorch_describe.md @@ -42,6 +42,7 @@ runai training pytorch describe --containers --pod-limit 20 --eve --pod-limit int32 Limit the number of pods displayed (-1 for no limit) (default 10) --pods Show pods information (default true) -p, --project string Specify the project to which the command applies. By default, commands apply to the default project. To change the default project use ‘runai config project ’ + --sortEvents string Sort the displayed events in ascending/descending order (asc, desc) (default "asc") ``` ### Options inherited from parent commands diff --git a/docs/Researcher/cli-reference/new-cli/runai_training_pytorch_list.md b/docs/Researcher/cli-reference/new-cli/runai_training_pytorch_list.md index 3cc93d908d..134b26faed 100644 --- a/docs/Researcher/cli-reference/new-cli/runai_training_pytorch_list.md +++ b/docs/Researcher/cli-reference/new-cli/runai_training_pytorch_list.md @@ -19,7 +19,7 @@ runai training pytorch list runai training pytorch list -p # List all pytorch training workloads with a specific output format -runai training pytorch list -o wide +runai training pytorch list --yaml # List pytorch training workloads with pagination runai training pytorch list --limit 20 --offset 40 @@ -31,7 +31,7 @@ runai training pytorch list --limit 20 --offset 40 -A, --all list workloads from all projects -h, --help help for list --json Output structure JSON - --limit int32 number of workload in list (default 50) + --limit int32 the maximum number of entries to return (default 50) --no-headers Output structure table without headers --offset int32 offset number of limit, default 0 (first offset) -p, --project string Specify the project to which the command applies. By default, commands apply to the default project. To change the default project use ‘runai config project ’ diff --git a/docs/Researcher/cli-reference/new-cli/runai_training_pytorch_logs.md b/docs/Researcher/cli-reference/new-cli/runai_training_pytorch_logs.md index 6a29f82ec7..c36339eb74 100644 --- a/docs/Researcher/cli-reference/new-cli/runai_training_pytorch_logs.md +++ b/docs/Researcher/cli-reference/new-cli/runai_training_pytorch_logs.md @@ -56,7 +56,7 @@ runai training pytorch logs pytorch-01 --wait-timeout=30s -p, --project string Specify the project to which the command applies. By default, commands apply to the default project. To change the default project use ‘runai config project ’ --since duration Return logs newer than a relative duration like 5s, 2m, or 3h. Defaults to all logs --since-time string Return logs after a specific date (RFC3339) - -t, --tail int Numer of tailed lines to fetch from the log, for no limit set to -1 (default -1) + -t, --tail int Number of tailed lines to fetch from the log, for no limit set to -1 (default -1) --timestamps Show timestamps in log output --wait-timeout duration Timeout for waiting for workload to be ready for log streaming ``` diff --git a/docs/Researcher/cli-reference/new-cli/runai_training_pytorch_submit.md b/docs/Researcher/cli-reference/new-cli/runai_training_pytorch_submit.md index bcdbc37154..09b1f5b1cb 100644 --- a/docs/Researcher/cli-reference/new-cli/runai_training_pytorch_submit.md +++ b/docs/Researcher/cli-reference/new-cli/runai_training_pytorch_submit.md @@ -46,7 +46,7 @@ runai training pytorch submit -p -i ubuntu --master-comman --cpu-memory-request string CPU memory to allocate for the job (e.g. 1G, 500M) --create-home-dir Create a temporary home directory. Defaults to true when --run-as-user is set, false otherwise -e, --environment stringArray Set environment variables in the container - --existing-pvc stringArray Mount an existing persistent volume. Use the format: claimname=CLAIM_NAME,path=PATH + --existing-pvc stringArray Mount an existing persistent volume. Use the format: claimname=CLAIM_NAME,path=PATH --extended-resource stringArray Request access to an extended resource. Use the format: resource_name=quantity --external-url stringArray Expose URL from the job container. Use the format: container=9443,url=https://external.runai.com,authusers=user1,authgroups=group1 --git-sync stringArray Specifies git repositories to mount into the container. Use the format: name=NAME,repository=REPO,path=PATH,secret=SECRET,rev=REVISION @@ -55,11 +55,11 @@ runai training pytorch submit -p -i ubuntu --master-comman --gpu-memory-request string GPU memory to allocate for the job (e.g. 1G, 500M) --gpu-portion-limit float GPU portion limit, must be no less than the gpu-memory-request (between 0 and 1, e.g. 0.5, 0.2) --gpu-portion-request float GPU portion request (between 0 and 1, e.g. 0.5, 0.2) - --gpu-request-type string GPU request type (portion|memory|migProfile) + --gpu-request-type string GPU request type (portion|memory|migProfile[Deprecated]) -h, --help help for submit --host-ipc Whether to enable host IPC. (Default: false) --host-network Whether to enable host networking. (Default: false) - --host-path stringArray Volumes to mount into the container. Use the format: path=PATH,mount=MOUNT,mount-propagation=None|HostToContainer,readwrite + --host-path stringArray host paths (Volumes) to mount into the container. Format: path=PATH,mount=MOUNT,mount-propagation=None|HostToContainer,readwrite -i, --image string The image for the workload --image-pull-policy string Set image pull policy. One of: Always, IfNotPresent, Never. Defaults to Always (default "Always") --label stringArray Set of labels to populate into the container running the workspace @@ -76,7 +76,7 @@ runai training pytorch submit -p -i ubuntu --master-comman --min-replicas int32 Minimum number of replicas for an elastic PyTorch job --name-prefix string Set defined prefix for the workload name and add index as a suffix --new-pvc stringArray Mount a persistent volume, create it if it does not exist. Use the format: claimname=CLAIM_NAME,storageclass=STORAGE_CLASS,size=SIZE,path=PATH,accessmode-rwo,accessmode-rom,accessmode-rwm,ro,ephemeral - --nfs stringArray NFS storage details. Use the format: path=PATH,server=SERVER,mountpath=MOUNT_PATH,readwrite + --nfs stringArray NFS volumes to use in the workload. Format: path=PATH,server=SERVER,mountpath=MOUNT_PATH,readwrite --no-master Do not create a separate pod for the master --node-pools stringArray List of node pools to use for scheduling the job, ordered by priority --node-type string Enforce node type affinity by setting a node-type label @@ -88,10 +88,12 @@ runai training pytorch submit -p -i ubuntu --master-comman --run-as-gid int The group ID the container will run with --run-as-uid int The user ID the container will run with --run-as-user takes the uid, gid, and supplementary groups fields from the token, if all the fields do not exist, uses the local running terminal user credentials. if any of the fields exist take only the existing fields - --s3 stringArray s3 storage details. Use the format: name=NAME,bucket=BUCKET,path=PATH,accesskey=ACCESS_KEY,url=URL + --s3 stringArray s3 buckets to use in the workload. Format: name=NAME,bucket=BUCKET,path=PATH,accesskey=ACCESS_KEY,url=URL --seccomp-profile string Indicates which kind of seccomp profile will be applied to the container, options: RuntimeDefault|Unconfined|Localhost + --secret-volume stringArray Secret volumes to use in the workload. Format: path=PATH,name=SECRET_RESOURCE_NAME --stdin Keep stdin open on the container(s) in the pod, even if nothing is attached --supplemental-groups ints Comma seperated list of groups (IDs) that the user running the container belongs to + --termination-grace-period duration The length of time (like 5s or 2m, higher than zero) the workload's pod is expected to terminate gracefully upon probe failure. In case value is not specified, kubernetes default of 30 seconds applies (default 0s) --toleration stringArray Toleration details. Use the format: operator=Equal|Exists,key=KEY,[value=VALUE],[effect=NoSchedule|NoExecute|PreferNoSchedule],[seconds=SECONDS] -t, --tty Allocate a TTY for the container --user-group-source string Indicate the way to determine the user and group ids of the container, options: fromTheImage|fromIdpToken|fromIdpToken diff --git a/docs/Researcher/cli-reference/new-cli/runai_training_standard_describe.md b/docs/Researcher/cli-reference/new-cli/runai_training_standard_describe.md index ac40d40b8e..3e06c3c160 100644 --- a/docs/Researcher/cli-reference/new-cli/runai_training_standard_describe.md +++ b/docs/Researcher/cli-reference/new-cli/runai_training_standard_describe.md @@ -42,6 +42,7 @@ runai training standard describe --containers --pod-limit 20 --e --pod-limit int32 Limit the number of pods displayed (-1 for no limit) (default 10) --pods Show pods information (default true) -p, --project string Specify the project to which the command applies. By default, commands apply to the default project. To change the default project use ‘runai config project ’ + --sortEvents string Sort the displayed events in ascending/descending order (asc, desc) (default "asc") ``` ### Options inherited from parent commands diff --git a/docs/Researcher/cli-reference/new-cli/runai_training_standard_list.md b/docs/Researcher/cli-reference/new-cli/runai_training_standard_list.md index 4a28e07d88..d56dbc5608 100644 --- a/docs/Researcher/cli-reference/new-cli/runai_training_standard_list.md +++ b/docs/Researcher/cli-reference/new-cli/runai_training_standard_list.md @@ -19,7 +19,7 @@ runai training standard list runai training standard list -p # List all standard training workloads with a specific output format -runai training standard list -o wide +runai training standard list --yaml # List standard training workloads with pagination runai training standard list --limit 20 --offset 40 @@ -31,7 +31,7 @@ runai training standard list --limit 20 --offset 40 -A, --all list workloads from all projects -h, --help help for list --json Output structure JSON - --limit int32 number of workload in list (default 50) + --limit int32 the maximum number of entries to return (default 50) --no-headers Output structure table without headers --offset int32 offset number of limit, default 0 (first offset) -p, --project string Specify the project to which the command applies. By default, commands apply to the default project. To change the default project use ‘runai config project ’ diff --git a/docs/Researcher/cli-reference/new-cli/runai_training_standard_logs.md b/docs/Researcher/cli-reference/new-cli/runai_training_standard_logs.md index abb65e6de6..6b98759993 100644 --- a/docs/Researcher/cli-reference/new-cli/runai_training_standard_logs.md +++ b/docs/Researcher/cli-reference/new-cli/runai_training_standard_logs.md @@ -56,7 +56,7 @@ runai training standard logs standard-01 --wait-timeout=30s -p, --project string Specify the project to which the command applies. By default, commands apply to the default project. To change the default project use ‘runai config project ’ --since duration Return logs newer than a relative duration like 5s, 2m, or 3h. Defaults to all logs --since-time string Return logs after a specific date (RFC3339) - -t, --tail int Numer of tailed lines to fetch from the log, for no limit set to -1 (default -1) + -t, --tail int Number of tailed lines to fetch from the log, for no limit set to -1 (default -1) --timestamps Show timestamps in log output --wait-timeout duration Timeout for waiting for workload to be ready for log streaming ``` diff --git a/docs/Researcher/cli-reference/new-cli/runai_training_standard_submit.md b/docs/Researcher/cli-reference/new-cli/runai_training_standard_submit.md index 13f03ff2d6..f447ff33f4 100644 --- a/docs/Researcher/cli-reference/new-cli/runai_training_standard_submit.md +++ b/docs/Researcher/cli-reference/new-cli/runai_training_standard_submit.md @@ -39,7 +39,7 @@ runai training standard submit -p -i jupyter/scipy-noteboo --cpu-memory-request string CPU memory to allocate for the job (e.g. 1G, 500M) --create-home-dir Create a temporary home directory. Defaults to true when --run-as-user is set, false otherwise -e, --environment stringArray Set environment variables in the container - --existing-pvc stringArray Mount an existing persistent volume. Use the format: claimname=CLAIM_NAME,path=PATH + --existing-pvc stringArray Mount an existing persistent volume. Use the format: claimname=CLAIM_NAME,path=PATH --extended-resource stringArray Request access to an extended resource. Use the format: resource_name=quantity --external-url stringArray Expose URL from the job container. Use the format: container=9443,url=https://external.runai.com,authusers=user1,authgroups=group1 --git-sync stringArray Specifies git repositories to mount into the container. Use the format: name=NAME,repository=REPO,path=PATH,secret=SECRET,rev=REVISION @@ -48,18 +48,18 @@ runai training standard submit -p -i jupyter/scipy-noteboo --gpu-memory-request string GPU memory to allocate for the job (e.g. 1G, 500M) --gpu-portion-limit float GPU portion limit, must be no less than the gpu-memory-request (between 0 and 1, e.g. 0.5, 0.2) --gpu-portion-request float GPU portion request (between 0 and 1, e.g. 0.5, 0.2) - --gpu-request-type string GPU request type (portion|memory|migProfile) + --gpu-request-type string GPU request type (portion|memory|migProfile[Deprecated]) -h, --help help for submit --host-ipc Whether to enable host IPC. (Default: false) --host-network Whether to enable host networking. (Default: false) - --host-path stringArray Volumes to mount into the container. Use the format: path=PATH,mount=MOUNT,mount-propagation=None|HostToContainer,readwrite + --host-path stringArray host paths (Volumes) to mount into the container. Format: path=PATH,mount=MOUNT,mount-propagation=None|HostToContainer,readwrite -i, --image string The image for the workload --image-pull-policy string Set image pull policy. One of: Always, IfNotPresent, Never. Defaults to Always (default "Always") --label stringArray Set of labels to populate into the container running the workspace --large-shm Request large /dev/shm device to mount --name-prefix string Set defined prefix for the workload name and add index as a suffix --new-pvc stringArray Mount a persistent volume, create it if it does not exist. Use the format: claimname=CLAIM_NAME,storageclass=STORAGE_CLASS,size=SIZE,path=PATH,accessmode-rwo,accessmode-rom,accessmode-rwm,ro,ephemeral - --nfs stringArray NFS storage details. Use the format: path=PATH,server=SERVER,mountpath=MOUNT_PATH,readwrite + --nfs stringArray NFS volumes to use in the workload. Format: path=PATH,server=SERVER,mountpath=MOUNT_PATH,readwrite --node-pools stringArray List of node pools to use for scheduling the job, ordered by priority --node-type string Enforce node type affinity by setting a node-type label --parallelism int32 Specifies the maximum number of pods that should run in parallel at any given time @@ -72,10 +72,12 @@ runai training standard submit -p -i jupyter/scipy-noteboo --run-as-uid int The user ID the container will run with --run-as-user takes the uid, gid, and supplementary groups fields from the token, if all the fields do not exist, uses the local running terminal user credentials. if any of the fields exist take only the existing fields --runs int32 Number of successful runs required for this workload to be considered completed - --s3 stringArray s3 storage details. Use the format: name=NAME,bucket=BUCKET,path=PATH,accesskey=ACCESS_KEY,url=URL + --s3 stringArray s3 buckets to use in the workload. Format: name=NAME,bucket=BUCKET,path=PATH,accesskey=ACCESS_KEY,url=URL --seccomp-profile string Indicates which kind of seccomp profile will be applied to the container, options: RuntimeDefault|Unconfined|Localhost + --secret-volume stringArray Secret volumes to use in the workload. Format: path=PATH,name=SECRET_RESOURCE_NAME --stdin Keep stdin open on the container(s) in the pod, even if nothing is attached --supplemental-groups ints Comma seperated list of groups (IDs) that the user running the container belongs to + --termination-grace-period duration The length of time (like 5s or 2m, higher than zero) the workload's pod is expected to terminate gracefully upon probe failure. In case value is not specified, kubernetes default of 30 seconds applies (default 0s) --toleration stringArray Toleration details. Use the format: operator=Equal|Exists,key=KEY,[value=VALUE],[effect=NoSchedule|NoExecute|PreferNoSchedule],[seconds=SECONDS] -t, --tty Allocate a TTY for the container --user-group-source string Indicate the way to determine the user and group ids of the container, options: fromTheImage|fromIdpToken|fromIdpToken diff --git a/docs/Researcher/cli-reference/new-cli/runai_training_submit.md b/docs/Researcher/cli-reference/new-cli/runai_training_submit.md index ad44b5c5b1..39d0a8ea69 100644 --- a/docs/Researcher/cli-reference/new-cli/runai_training_submit.md +++ b/docs/Researcher/cli-reference/new-cli/runai_training_submit.md @@ -39,7 +39,7 @@ runai training standard submit -p -i jupyter/scipy-noteboo --cpu-memory-request string CPU memory to allocate for the job (e.g. 1G, 500M) --create-home-dir Create a temporary home directory. Defaults to true when --run-as-user is set, false otherwise -e, --environment stringArray Set environment variables in the container - --existing-pvc stringArray Mount an existing persistent volume. Use the format: claimname=CLAIM_NAME,path=PATH + --existing-pvc stringArray Mount an existing persistent volume. Use the format: claimname=CLAIM_NAME,path=PATH --extended-resource stringArray Request access to an extended resource. Use the format: resource_name=quantity --external-url stringArray Expose URL from the job container. Use the format: container=9443,url=https://external.runai.com,authusers=user1,authgroups=group1 --git-sync stringArray Specifies git repositories to mount into the container. Use the format: name=NAME,repository=REPO,path=PATH,secret=SECRET,rev=REVISION @@ -48,18 +48,18 @@ runai training standard submit -p -i jupyter/scipy-noteboo --gpu-memory-request string GPU memory to allocate for the job (e.g. 1G, 500M) --gpu-portion-limit float GPU portion limit, must be no less than the gpu-memory-request (between 0 and 1, e.g. 0.5, 0.2) --gpu-portion-request float GPU portion request (between 0 and 1, e.g. 0.5, 0.2) - --gpu-request-type string GPU request type (portion|memory|migProfile) + --gpu-request-type string GPU request type (portion|memory|migProfile[Deprecated]) -h, --help help for submit --host-ipc Whether to enable host IPC. (Default: false) --host-network Whether to enable host networking. (Default: false) - --host-path stringArray Volumes to mount into the container. Use the format: path=PATH,mount=MOUNT,mount-propagation=None|HostToContainer,readwrite + --host-path stringArray host paths (Volumes) to mount into the container. Format: path=PATH,mount=MOUNT,mount-propagation=None|HostToContainer,readwrite -i, --image string The image for the workload --image-pull-policy string Set image pull policy. One of: Always, IfNotPresent, Never. Defaults to Always (default "Always") --label stringArray Set of labels to populate into the container running the workspace --large-shm Request large /dev/shm device to mount --name-prefix string Set defined prefix for the workload name and add index as a suffix --new-pvc stringArray Mount a persistent volume, create it if it does not exist. Use the format: claimname=CLAIM_NAME,storageclass=STORAGE_CLASS,size=SIZE,path=PATH,accessmode-rwo,accessmode-rom,accessmode-rwm,ro,ephemeral - --nfs stringArray NFS storage details. Use the format: path=PATH,server=SERVER,mountpath=MOUNT_PATH,readwrite + --nfs stringArray NFS volumes to use in the workload. Format: path=PATH,server=SERVER,mountpath=MOUNT_PATH,readwrite --node-pools stringArray List of node pools to use for scheduling the job, ordered by priority --node-type string Enforce node type affinity by setting a node-type label --parallelism int32 Specifies the maximum number of pods that should run in parallel at any given time @@ -72,10 +72,12 @@ runai training standard submit -p -i jupyter/scipy-noteboo --run-as-uid int The user ID the container will run with --run-as-user takes the uid, gid, and supplementary groups fields from the token, if all the fields do not exist, uses the local running terminal user credentials. if any of the fields exist take only the existing fields --runs int32 Number of successful runs required for this workload to be considered completed - --s3 stringArray s3 storage details. Use the format: name=NAME,bucket=BUCKET,path=PATH,accesskey=ACCESS_KEY,url=URL + --s3 stringArray s3 buckets to use in the workload. Format: name=NAME,bucket=BUCKET,path=PATH,accesskey=ACCESS_KEY,url=URL --seccomp-profile string Indicates which kind of seccomp profile will be applied to the container, options: RuntimeDefault|Unconfined|Localhost + --secret-volume stringArray Secret volumes to use in the workload. Format: path=PATH,name=SECRET_RESOURCE_NAME --stdin Keep stdin open on the container(s) in the pod, even if nothing is attached --supplemental-groups ints Comma seperated list of groups (IDs) that the user running the container belongs to + --termination-grace-period duration The length of time (like 5s or 2m, higher than zero) the workload's pod is expected to terminate gracefully upon probe failure. In case value is not specified, kubernetes default of 30 seconds applies (default 0s) --toleration stringArray Toleration details. Use the format: operator=Equal|Exists,key=KEY,[value=VALUE],[effect=NoSchedule|NoExecute|PreferNoSchedule],[seconds=SECONDS] -t, --tty Allocate a TTY for the container --user-group-source string Indicate the way to determine the user and group ids of the container, options: fromTheImage|fromIdpToken|fromIdpToken diff --git a/docs/Researcher/cli-reference/new-cli/runai_training_tensorflow_describe.md b/docs/Researcher/cli-reference/new-cli/runai_training_tensorflow_describe.md index e8fc847363..1444777b57 100644 --- a/docs/Researcher/cli-reference/new-cli/runai_training_tensorflow_describe.md +++ b/docs/Researcher/cli-reference/new-cli/runai_training_tensorflow_describe.md @@ -42,6 +42,7 @@ runai training tf describe --containers --pod-limit 20 --event-limit 1 --pod-limit int32 Limit the number of pods displayed (-1 for no limit) (default 10) --pods Show pods information (default true) -p, --project string Specify the project to which the command applies. By default, commands apply to the default project. To change the default project use ‘runai config project ’ + --sortEvents string Sort the displayed events in ascending/descending order (asc, desc) (default "asc") ``` ### Options inherited from parent commands diff --git a/docs/Researcher/cli-reference/new-cli/runai_training_tensorflow_list.md b/docs/Researcher/cli-reference/new-cli/runai_training_tensorflow_list.md index 0ad808f5fc..c0f1ff0a1d 100644 --- a/docs/Researcher/cli-reference/new-cli/runai_training_tensorflow_list.md +++ b/docs/Researcher/cli-reference/new-cli/runai_training_tensorflow_list.md @@ -19,7 +19,7 @@ runai training tf list runai training tf list -p # List all tf training workloads with a specific output format -runai training tf list -o wide +runai training tf list --yaml # List tf training workloads with pagination runai training tf list --limit 20 --offset 40 @@ -31,7 +31,7 @@ runai training tf list --limit 20 --offset 40 -A, --all list workloads from all projects -h, --help help for list --json Output structure JSON - --limit int32 number of workload in list (default 50) + --limit int32 the maximum number of entries to return (default 50) --no-headers Output structure table without headers --offset int32 offset number of limit, default 0 (first offset) -p, --project string Specify the project to which the command applies. By default, commands apply to the default project. To change the default project use ‘runai config project ’ diff --git a/docs/Researcher/cli-reference/new-cli/runai_training_tensorflow_logs.md b/docs/Researcher/cli-reference/new-cli/runai_training_tensorflow_logs.md index 341f4ee534..ee2b698a43 100644 --- a/docs/Researcher/cli-reference/new-cli/runai_training_tensorflow_logs.md +++ b/docs/Researcher/cli-reference/new-cli/runai_training_tensorflow_logs.md @@ -56,7 +56,7 @@ runai training tf logs tf-01 --wait-timeout=30s -p, --project string Specify the project to which the command applies. By default, commands apply to the default project. To change the default project use ‘runai config project ’ --since duration Return logs newer than a relative duration like 5s, 2m, or 3h. Defaults to all logs --since-time string Return logs after a specific date (RFC3339) - -t, --tail int Numer of tailed lines to fetch from the log, for no limit set to -1 (default -1) + -t, --tail int Number of tailed lines to fetch from the log, for no limit set to -1 (default -1) --timestamps Show timestamps in log output --wait-timeout duration Timeout for waiting for workload to be ready for log streaming ``` diff --git a/docs/Researcher/cli-reference/new-cli/runai_training_tensorflow_submit.md b/docs/Researcher/cli-reference/new-cli/runai_training_tensorflow_submit.md index a59eafc0c2..6c27b9a0fb 100644 --- a/docs/Researcher/cli-reference/new-cli/runai_training_tensorflow_submit.md +++ b/docs/Researcher/cli-reference/new-cli/runai_training_tensorflow_submit.md @@ -46,7 +46,7 @@ runai training tf submit -p -i ubuntu --master-command "ec --cpu-memory-request string CPU memory to allocate for the job (e.g. 1G, 500M) --create-home-dir Create a temporary home directory. Defaults to true when --run-as-user is set, false otherwise -e, --environment stringArray Set environment variables in the container - --existing-pvc stringArray Mount an existing persistent volume. Use the format: claimname=CLAIM_NAME,path=PATH + --existing-pvc stringArray Mount an existing persistent volume. Use the format: claimname=CLAIM_NAME,path=PATH --extended-resource stringArray Request access to an extended resource. Use the format: resource_name=quantity --external-url stringArray Expose URL from the job container. Use the format: container=9443,url=https://external.runai.com,authusers=user1,authgroups=group1 --git-sync stringArray Specifies git repositories to mount into the container. Use the format: name=NAME,repository=REPO,path=PATH,secret=SECRET,rev=REVISION @@ -55,11 +55,11 @@ runai training tf submit -p -i ubuntu --master-command "ec --gpu-memory-request string GPU memory to allocate for the job (e.g. 1G, 500M) --gpu-portion-limit float GPU portion limit, must be no less than the gpu-memory-request (between 0 and 1, e.g. 0.5, 0.2) --gpu-portion-request float GPU portion request (between 0 and 1, e.g. 0.5, 0.2) - --gpu-request-type string GPU request type (portion|memory|migProfile) + --gpu-request-type string GPU request type (portion|memory|migProfile[Deprecated]) -h, --help help for submit --host-ipc Whether to enable host IPC. (Default: false) --host-network Whether to enable host networking. (Default: false) - --host-path stringArray Volumes to mount into the container. Use the format: path=PATH,mount=MOUNT,mount-propagation=None|HostToContainer,readwrite + --host-path stringArray host paths (Volumes) to mount into the container. Format: path=PATH,mount=MOUNT,mount-propagation=None|HostToContainer,readwrite -i, --image string The image for the workload --image-pull-policy string Set image pull policy. One of: Always, IfNotPresent, Never. Defaults to Always (default "Always") --label stringArray Set of labels to populate into the container running the workspace @@ -72,11 +72,9 @@ runai training tf submit -p -i ubuntu --master-command "ec --master-gpu-portion-limit float GPU portion limit, must be no less than the gpu-memory-request (between 0 and 1, e.g. 0.5, 0.2) --master-gpu-portion-request float GPU portion request (between 0 and 1, e.g. 0.5, 0.2) --master-no-pvcs Do not mount any persistent volumes in the master pod - --max-replicas int32 Maximum number of replicas for an elastic PyTorch job - --min-replicas int32 Minimum number of replicas for an elastic PyTorch job --name-prefix string Set defined prefix for the workload name and add index as a suffix --new-pvc stringArray Mount a persistent volume, create it if it does not exist. Use the format: claimname=CLAIM_NAME,storageclass=STORAGE_CLASS,size=SIZE,path=PATH,accessmode-rwo,accessmode-rom,accessmode-rwm,ro,ephemeral - --nfs stringArray NFS storage details. Use the format: path=PATH,server=SERVER,mountpath=MOUNT_PATH,readwrite + --nfs stringArray NFS volumes to use in the workload. Format: path=PATH,server=SERVER,mountpath=MOUNT_PATH,readwrite --no-master Do not create a separate pod for the master --node-pools stringArray List of node pools to use for scheduling the job, ordered by priority --node-type string Enforce node type affinity by setting a node-type label @@ -88,10 +86,12 @@ runai training tf submit -p -i ubuntu --master-command "ec --run-as-gid int The group ID the container will run with --run-as-uid int The user ID the container will run with --run-as-user takes the uid, gid, and supplementary groups fields from the token, if all the fields do not exist, uses the local running terminal user credentials. if any of the fields exist take only the existing fields - --s3 stringArray s3 storage details. Use the format: name=NAME,bucket=BUCKET,path=PATH,accesskey=ACCESS_KEY,url=URL + --s3 stringArray s3 buckets to use in the workload. Format: name=NAME,bucket=BUCKET,path=PATH,accesskey=ACCESS_KEY,url=URL --seccomp-profile string Indicates which kind of seccomp profile will be applied to the container, options: RuntimeDefault|Unconfined|Localhost + --secret-volume stringArray Secret volumes to use in the workload. Format: path=PATH,name=SECRET_RESOURCE_NAME --stdin Keep stdin open on the container(s) in the pod, even if nothing is attached --supplemental-groups ints Comma seperated list of groups (IDs) that the user running the container belongs to + --termination-grace-period duration The length of time (like 5s or 2m, higher than zero) the workload's pod is expected to terminate gracefully upon probe failure. In case value is not specified, kubernetes default of 30 seconds applies (default 0s) --toleration stringArray Toleration details. Use the format: operator=Equal|Exists,key=KEY,[value=VALUE],[effect=NoSchedule|NoExecute|PreferNoSchedule],[seconds=SECONDS] -t, --tty Allocate a TTY for the container --user-group-source string Indicate the way to determine the user and group ids of the container, options: fromTheImage|fromIdpToken|fromIdpToken diff --git a/docs/Researcher/cli-reference/new-cli/runai_training_xgboost_describe.md b/docs/Researcher/cli-reference/new-cli/runai_training_xgboost_describe.md index 17cd31dceb..81c929201d 100644 --- a/docs/Researcher/cli-reference/new-cli/runai_training_xgboost_describe.md +++ b/docs/Researcher/cli-reference/new-cli/runai_training_xgboost_describe.md @@ -42,6 +42,7 @@ runai training xgboost describe --containers --pod-limit 20 --eve --pod-limit int32 Limit the number of pods displayed (-1 for no limit) (default 10) --pods Show pods information (default true) -p, --project string Specify the project to which the command applies. By default, commands apply to the default project. To change the default project use ‘runai config project ’ + --sortEvents string Sort the displayed events in ascending/descending order (asc, desc) (default "asc") ``` ### Options inherited from parent commands diff --git a/docs/Researcher/cli-reference/new-cli/runai_training_xgboost_list.md b/docs/Researcher/cli-reference/new-cli/runai_training_xgboost_list.md index 89f0cb773a..6fa336a6a5 100644 --- a/docs/Researcher/cli-reference/new-cli/runai_training_xgboost_list.md +++ b/docs/Researcher/cli-reference/new-cli/runai_training_xgboost_list.md @@ -19,7 +19,7 @@ runai training xgboost list runai training xgboost list -p # List all xgboost training workloads with a specific output format -runai training xgboost list -o wide +runai training xgboost list --yaml # List xgboost training workloads with pagination runai training xgboost list --limit 20 --offset 40 @@ -31,7 +31,7 @@ runai training xgboost list --limit 20 --offset 40 -A, --all list workloads from all projects -h, --help help for list --json Output structure JSON - --limit int32 number of workload in list (default 50) + --limit int32 the maximum number of entries to return (default 50) --no-headers Output structure table without headers --offset int32 offset number of limit, default 0 (first offset) -p, --project string Specify the project to which the command applies. By default, commands apply to the default project. To change the default project use ‘runai config project ’ diff --git a/docs/Researcher/cli-reference/new-cli/runai_training_xgboost_logs.md b/docs/Researcher/cli-reference/new-cli/runai_training_xgboost_logs.md index 191b947311..67e415169e 100644 --- a/docs/Researcher/cli-reference/new-cli/runai_training_xgboost_logs.md +++ b/docs/Researcher/cli-reference/new-cli/runai_training_xgboost_logs.md @@ -56,7 +56,7 @@ runai training xgboost logs xgboost-01 --wait-timeout=30s -p, --project string Specify the project to which the command applies. By default, commands apply to the default project. To change the default project use ‘runai config project ’ --since duration Return logs newer than a relative duration like 5s, 2m, or 3h. Defaults to all logs --since-time string Return logs after a specific date (RFC3339) - -t, --tail int Numer of tailed lines to fetch from the log, for no limit set to -1 (default -1) + -t, --tail int Number of tailed lines to fetch from the log, for no limit set to -1 (default -1) --timestamps Show timestamps in log output --wait-timeout duration Timeout for waiting for workload to be ready for log streaming ``` diff --git a/docs/Researcher/cli-reference/new-cli/runai_training_xgboost_submit.md b/docs/Researcher/cli-reference/new-cli/runai_training_xgboost_submit.md index d6dd4803b7..4eed8bc6cf 100644 --- a/docs/Researcher/cli-reference/new-cli/runai_training_xgboost_submit.md +++ b/docs/Researcher/cli-reference/new-cli/runai_training_xgboost_submit.md @@ -46,7 +46,7 @@ runai training xgboost submit -p -i ubuntu --master-comman --cpu-memory-request string CPU memory to allocate for the job (e.g. 1G, 500M) --create-home-dir Create a temporary home directory. Defaults to true when --run-as-user is set, false otherwise -e, --environment stringArray Set environment variables in the container - --existing-pvc stringArray Mount an existing persistent volume. Use the format: claimname=CLAIM_NAME,path=PATH + --existing-pvc stringArray Mount an existing persistent volume. Use the format: claimname=CLAIM_NAME,path=PATH --extended-resource stringArray Request access to an extended resource. Use the format: resource_name=quantity --external-url stringArray Expose URL from the job container. Use the format: container=9443,url=https://external.runai.com,authusers=user1,authgroups=group1 --git-sync stringArray Specifies git repositories to mount into the container. Use the format: name=NAME,repository=REPO,path=PATH,secret=SECRET,rev=REVISION @@ -55,11 +55,11 @@ runai training xgboost submit -p -i ubuntu --master-comman --gpu-memory-request string GPU memory to allocate for the job (e.g. 1G, 500M) --gpu-portion-limit float GPU portion limit, must be no less than the gpu-memory-request (between 0 and 1, e.g. 0.5, 0.2) --gpu-portion-request float GPU portion request (between 0 and 1, e.g. 0.5, 0.2) - --gpu-request-type string GPU request type (portion|memory|migProfile) + --gpu-request-type string GPU request type (portion|memory|migProfile[Deprecated]) -h, --help help for submit --host-ipc Whether to enable host IPC. (Default: false) --host-network Whether to enable host networking. (Default: false) - --host-path stringArray Volumes to mount into the container. Use the format: path=PATH,mount=MOUNT,mount-propagation=None|HostToContainer,readwrite + --host-path stringArray host paths (Volumes) to mount into the container. Format: path=PATH,mount=MOUNT,mount-propagation=None|HostToContainer,readwrite -i, --image string The image for the workload --image-pull-policy string Set image pull policy. One of: Always, IfNotPresent, Never. Defaults to Always (default "Always") --label stringArray Set of labels to populate into the container running the workspace @@ -74,7 +74,7 @@ runai training xgboost submit -p -i ubuntu --master-comman --master-no-pvcs Do not mount any persistent volumes in the master pod --name-prefix string Set defined prefix for the workload name and add index as a suffix --new-pvc stringArray Mount a persistent volume, create it if it does not exist. Use the format: claimname=CLAIM_NAME,storageclass=STORAGE_CLASS,size=SIZE,path=PATH,accessmode-rwo,accessmode-rom,accessmode-rwm,ro,ephemeral - --nfs stringArray NFS storage details. Use the format: path=PATH,server=SERVER,mountpath=MOUNT_PATH,readwrite + --nfs stringArray NFS volumes to use in the workload. Format: path=PATH,server=SERVER,mountpath=MOUNT_PATH,readwrite --node-pools stringArray List of node pools to use for scheduling the job, ordered by priority --node-type string Enforce node type affinity by setting a node-type label --pod-running-timeout duration Pod check for running state timeout. @@ -85,10 +85,12 @@ runai training xgboost submit -p -i ubuntu --master-comman --run-as-gid int The group ID the container will run with --run-as-uid int The user ID the container will run with --run-as-user takes the uid, gid, and supplementary groups fields from the token, if all the fields do not exist, uses the local running terminal user credentials. if any of the fields exist take only the existing fields - --s3 stringArray s3 storage details. Use the format: name=NAME,bucket=BUCKET,path=PATH,accesskey=ACCESS_KEY,url=URL + --s3 stringArray s3 buckets to use in the workload. Format: name=NAME,bucket=BUCKET,path=PATH,accesskey=ACCESS_KEY,url=URL --seccomp-profile string Indicates which kind of seccomp profile will be applied to the container, options: RuntimeDefault|Unconfined|Localhost + --secret-volume stringArray Secret volumes to use in the workload. Format: path=PATH,name=SECRET_RESOURCE_NAME --stdin Keep stdin open on the container(s) in the pod, even if nothing is attached --supplemental-groups ints Comma seperated list of groups (IDs) that the user running the container belongs to + --termination-grace-period duration The length of time (like 5s or 2m, higher than zero) the workload's pod is expected to terminate gracefully upon probe failure. In case value is not specified, kubernetes default of 30 seconds applies (default 0s) --toleration stringArray Toleration details. Use the format: operator=Equal|Exists,key=KEY,[value=VALUE],[effect=NoSchedule|NoExecute|PreferNoSchedule],[seconds=SECONDS] -t, --tty Allocate a TTY for the container --user-group-source string Indicate the way to determine the user and group ids of the container, options: fromTheImage|fromIdpToken|fromIdpToken diff --git a/docs/Researcher/cli-reference/new-cli/runai_upgrade.md b/docs/Researcher/cli-reference/new-cli/runai_upgrade.md index 62ed42eeee..5f74c77f42 100644 --- a/docs/Researcher/cli-reference/new-cli/runai_upgrade.md +++ b/docs/Researcher/cli-reference/new-cli/runai_upgrade.md @@ -9,8 +9,9 @@ runai upgrade [flags] ### Options ``` - --force upgrade CLI without checking for new version - -h, --help help for upgrade + --exec string the execute command that will be run the upgrade (default "bash") + --force upgrade CLI without checking for new version + -h, --help help for upgrade ``` ### Options inherited from parent commands diff --git a/docs/Researcher/cli-reference/new-cli/runai_workload.md b/docs/Researcher/cli-reference/new-cli/runai_workload.md index d6e59e0de1..2b1f413c2a 100644 --- a/docs/Researcher/cli-reference/new-cli/runai_workload.md +++ b/docs/Researcher/cli-reference/new-cli/runai_workload.md @@ -5,8 +5,7 @@ workload management ### Options ``` - -h, --help help for workload - --interactive enable set interactive mode (enabled|disabled) + -h, --help help for workload ``` ### Options inherited from parent commands @@ -22,6 +21,7 @@ workload management ### SEE ALSO * [runai](runai.md) - Run:ai Command-line Interface +* [runai workload delete](runai_workload_delete.md) - Delete workloads * [runai workload describe](runai_workload_describe.md) - Describe a workload * [runai workload list](runai_workload_list.md) - List workloads diff --git a/docs/Researcher/cli-reference/new-cli/runai_workload_delete.md b/docs/Researcher/cli-reference/new-cli/runai_workload_delete.md new file mode 100644 index 0000000000..cee99c093e --- /dev/null +++ b/docs/Researcher/cli-reference/new-cli/runai_workload_delete.md @@ -0,0 +1,48 @@ +## runai workload delete + +Delete workloads + +``` +runai workload delete [flags] +``` + +### Examples + +``` +# Delete multiple workloads +runai delete -p proj1 workload01 workload02 workload03 + +# Delete list of workloads with PyTorch framework filter +runai delete -p proj1 --framework pytorch workload01 workload02 workload03 + +# Delete list of workloads with training type filter +runai delete -p proj1 --type training workload01 workload02 workload03 + +# Delete multiple workloads by bypassing confirmation +runai delete -p proj1 -y workload01 workload02 workload03 +``` + +### Options + +``` + --framework string filter by workload framework + -h, --help help for delete + -p, --project string Specify the project to which the command applies. By default, commands apply to the default project. To change the default project use ‘runai config project ’ + --type string filter by workload type + -y, --yes bypass confirmation dialog by answering yes +``` + +### Options inherited from parent commands + +``` + --config-file string config file name; can be set by environment variable RUNAI_CLI_CONFIG_FILE (default "config.json") + --config-path string config path; can be set by environment variable RUNAI_CLI_CONFIG_PATH + -d, --debug enable debug mode + -q, --quiet enable quiet mode, suppress all output except error messages + --verbose enable verbose mode +``` + +### SEE ALSO + +* [runai workload](runai_workload.md) - workload management + diff --git a/docs/Researcher/cli-reference/new-cli/runai_workload_describe.md b/docs/Researcher/cli-reference/new-cli/runai_workload_describe.md index b184eb07a0..56a6f6f1c4 100644 --- a/docs/Researcher/cli-reference/new-cli/runai_workload_describe.md +++ b/docs/Researcher/cli-reference/new-cli/runai_workload_describe.md @@ -21,6 +21,7 @@ runai workload describe WORKLOAD_NAME [flags] --pod-limit int32 Limit the number of pods displayed (-1 for no limit) (default 10) --pods Show pods information (default true) -p, --project string Specify the project to which the command applies. By default, commands apply to the default project. To change the default project use ‘runai config project ’ + --sortEvents string Sort the displayed events in ascending/descending order (asc, desc) (default "asc") --type string The type of the workload (training, workspace) ``` diff --git a/docs/Researcher/cli-reference/new-cli/runai_workload_list.md b/docs/Researcher/cli-reference/new-cli/runai_workload_list.md index cbc7057c9b..71a6818b01 100644 --- a/docs/Researcher/cli-reference/new-cli/runai_workload_list.md +++ b/docs/Researcher/cli-reference/new-cli/runai_workload_list.md @@ -13,7 +13,7 @@ runai workload list [flags] --framework string filter by workload framework -h, --help help for list --json Output structure JSON - --limit int32 number of workload in list (default 50) + --limit int32 the maximum number of entries to return (default 50) --no-headers Output structure table without headers --offset int32 offset number of limit, default 0 (first offset) -p, --project string Specify the project to which the command applies. By default, commands apply to the default project. To change the default project use ‘runai config project ’ diff --git a/docs/Researcher/cli-reference/new-cli/runai_workspace_describe.md b/docs/Researcher/cli-reference/new-cli/runai_workspace_describe.md index d1a830819b..a570403899 100644 --- a/docs/Researcher/cli-reference/new-cli/runai_workspace_describe.md +++ b/docs/Researcher/cli-reference/new-cli/runai_workspace_describe.md @@ -42,6 +42,7 @@ runai workspace describe --containers --pod-limit 20 --event-li --pod-limit int32 Limit the number of pods displayed (-1 for no limit) (default 10) --pods Show pods information (default true) -p, --project string Specify the project to which the command applies. By default, commands apply to the default project. To change the default project use ‘runai config project ’ + --sortEvents string Sort the displayed events in ascending/descending order (asc, desc) (default "asc") ``` ### Options inherited from parent commands diff --git a/docs/Researcher/cli-reference/new-cli/runai_workspace_list.md b/docs/Researcher/cli-reference/new-cli/runai_workspace_list.md index 71aadd5a63..33ff004d39 100644 --- a/docs/Researcher/cli-reference/new-cli/runai_workspace_list.md +++ b/docs/Researcher/cli-reference/new-cli/runai_workspace_list.md @@ -19,7 +19,7 @@ runai workspace list runai workspace list -p # List all workspace workloads with a specific output format -runai workspace list -o wide +runai workspace list --yaml # List workspace workloads with pagination runai workspace list --limit 20 --offset 40 @@ -31,7 +31,7 @@ runai workspace list --limit 20 --offset 40 -A, --all list workloads from all projects -h, --help help for list --json Output structure JSON - --limit int32 number of workload in list (default 50) + --limit int32 the maximum number of entries to return (default 50) --no-headers Output structure table without headers --offset int32 offset number of limit, default 0 (first offset) -p, --project string Specify the project to which the command applies. By default, commands apply to the default project. To change the default project use ‘runai config project ’ diff --git a/docs/Researcher/cli-reference/new-cli/runai_workspace_logs.md b/docs/Researcher/cli-reference/new-cli/runai_workspace_logs.md index da563403c4..f2eaef6988 100644 --- a/docs/Researcher/cli-reference/new-cli/runai_workspace_logs.md +++ b/docs/Researcher/cli-reference/new-cli/runai_workspace_logs.md @@ -56,7 +56,7 @@ runai workspace logs workspace-01 --wait-timeout=30s -p, --project string Specify the project to which the command applies. By default, commands apply to the default project. To change the default project use ‘runai config project ’ --since duration Return logs newer than a relative duration like 5s, 2m, or 3h. Defaults to all logs --since-time string Return logs after a specific date (RFC3339) - -t, --tail int Numer of tailed lines to fetch from the log, for no limit set to -1 (default -1) + -t, --tail int Number of tailed lines to fetch from the log, for no limit set to -1 (default -1) --timestamps Show timestamps in log output --wait-timeout duration Timeout for waiting for workload to be ready for log streaming ``` diff --git a/docs/Researcher/cli-reference/new-cli/runai_workspace_submit.md b/docs/Researcher/cli-reference/new-cli/runai_workspace_submit.md index 154c0f646d..473af072fa 100644 --- a/docs/Researcher/cli-reference/new-cli/runai_workspace_submit.md +++ b/docs/Researcher/cli-reference/new-cli/runai_workspace_submit.md @@ -39,7 +39,7 @@ runai workspace submit -p -i jupyter/scipy-notebook --gpu- --cpu-memory-request string CPU memory to allocate for the job (e.g. 1G, 500M) --create-home-dir Create a temporary home directory. Defaults to true when --run-as-user is set, false otherwise -e, --environment stringArray Set environment variables in the container - --existing-pvc stringArray Mount an existing persistent volume. Use the format: claimname=CLAIM_NAME,path=PATH + --existing-pvc stringArray Mount an existing persistent volume. Use the format: claimname=CLAIM_NAME,path=PATH --extended-resource stringArray Request access to an extended resource. Use the format: resource_name=quantity --external-url stringArray Expose URL from the job container. Use the format: container=9443,url=https://external.runai.com,authusers=user1,authgroups=group1 --git-sync stringArray Specifies git repositories to mount into the container. Use the format: name=NAME,repository=REPO,path=PATH,secret=SECRET,rev=REVISION @@ -48,18 +48,18 @@ runai workspace submit -p -i jupyter/scipy-notebook --gpu- --gpu-memory-request string GPU memory to allocate for the job (e.g. 1G, 500M) --gpu-portion-limit float GPU portion limit, must be no less than the gpu-memory-request (between 0 and 1, e.g. 0.5, 0.2) --gpu-portion-request float GPU portion request (between 0 and 1, e.g. 0.5, 0.2) - --gpu-request-type string GPU request type (portion|memory|migProfile) + --gpu-request-type string GPU request type (portion|memory|migProfile[Deprecated]) -h, --help help for submit --host-ipc Whether to enable host IPC. (Default: false) --host-network Whether to enable host networking. (Default: false) - --host-path stringArray Volumes to mount into the container. Use the format: path=PATH,mount=MOUNT,mount-propagation=None|HostToContainer,readwrite + --host-path stringArray host paths (Volumes) to mount into the container. Format: path=PATH,mount=MOUNT,mount-propagation=None|HostToContainer,readwrite -i, --image string The image for the workload --image-pull-policy string Set image pull policy. One of: Always, IfNotPresent, Never. Defaults to Always (default "Always") --label stringArray Set of labels to populate into the container running the workspace --large-shm Request large /dev/shm device to mount --name-prefix string Set defined prefix for the workload name and add index as a suffix --new-pvc stringArray Mount a persistent volume, create it if it does not exist. Use the format: claimname=CLAIM_NAME,storageclass=STORAGE_CLASS,size=SIZE,path=PATH,accessmode-rwo,accessmode-rom,accessmode-rwm,ro,ephemeral - --nfs stringArray NFS storage details. Use the format: path=PATH,server=SERVER,mountpath=MOUNT_PATH,readwrite + --nfs stringArray NFS volumes to use in the workload. Format: path=PATH,server=SERVER,mountpath=MOUNT_PATH,readwrite --node-pools stringArray List of node pools to use for scheduling the job, ordered by priority --node-type string Enforce node type affinity by setting a node-type label --pod-running-timeout duration Pod check for running state timeout. @@ -71,10 +71,12 @@ runai workspace submit -p -i jupyter/scipy-notebook --gpu- --run-as-gid int The group ID the container will run with --run-as-uid int The user ID the container will run with --run-as-user takes the uid, gid, and supplementary groups fields from the token, if all the fields do not exist, uses the local running terminal user credentials. if any of the fields exist take only the existing fields - --s3 stringArray s3 storage details. Use the format: name=NAME,bucket=BUCKET,path=PATH,accesskey=ACCESS_KEY,url=URL + --s3 stringArray s3 buckets to use in the workload. Format: name=NAME,bucket=BUCKET,path=PATH,accesskey=ACCESS_KEY,url=URL --seccomp-profile string Indicates which kind of seccomp profile will be applied to the container, options: RuntimeDefault|Unconfined|Localhost + --secret-volume stringArray Secret volumes to use in the workload. Format: path=PATH,name=SECRET_RESOURCE_NAME --stdin Keep stdin open on the container(s) in the pod, even if nothing is attached --supplemental-groups ints Comma seperated list of groups (IDs) that the user running the container belongs to + --termination-grace-period duration The length of time (like 5s or 2m, higher than zero) the workload's pod is expected to terminate gracefully upon probe failure. In case value is not specified, kubernetes default of 30 seconds applies (default 0s) --toleration stringArray Toleration details. Use the format: operator=Equal|Exists,key=KEY,[value=VALUE],[effect=NoSchedule|NoExecute|PreferNoSchedule],[seconds=SECONDS] -t, --tty Allocate a TTY for the container --user-group-source string Indicate the way to determine the user and group ids of the container, options: fromTheImage|fromIdpToken|fromIdpToken diff --git a/docs/Researcher/cli-reference/new-cli/runai_xgboost_describe.md b/docs/Researcher/cli-reference/new-cli/runai_xgboost_describe.md index bba4c000d6..440b3c190a 100644 --- a/docs/Researcher/cli-reference/new-cli/runai_xgboost_describe.md +++ b/docs/Researcher/cli-reference/new-cli/runai_xgboost_describe.md @@ -42,6 +42,7 @@ runai training xgboost describe --containers --pod-limit 20 --eve --pod-limit int32 Limit the number of pods displayed (-1 for no limit) (default 10) --pods Show pods information (default true) -p, --project string Specify the project to which the command applies. By default, commands apply to the default project. To change the default project use ‘runai config project ’ + --sortEvents string Sort the displayed events in ascending/descending order (asc, desc) (default "asc") ``` ### Options inherited from parent commands diff --git a/docs/Researcher/cli-reference/new-cli/runai_xgboost_list.md b/docs/Researcher/cli-reference/new-cli/runai_xgboost_list.md index ed1df40f92..3f8d1c3be9 100644 --- a/docs/Researcher/cli-reference/new-cli/runai_xgboost_list.md +++ b/docs/Researcher/cli-reference/new-cli/runai_xgboost_list.md @@ -19,7 +19,7 @@ runai training xgboost list runai training xgboost list -p # List all xgboost training workloads with a specific output format -runai training xgboost list -o wide +runai training xgboost list --yaml # List xgboost training workloads with pagination runai training xgboost list --limit 20 --offset 40 @@ -31,7 +31,7 @@ runai training xgboost list --limit 20 --offset 40 -A, --all list workloads from all projects -h, --help help for list --json Output structure JSON - --limit int32 number of workload in list (default 50) + --limit int32 the maximum number of entries to return (default 50) --no-headers Output structure table without headers --offset int32 offset number of limit, default 0 (first offset) -p, --project string Specify the project to which the command applies. By default, commands apply to the default project. To change the default project use ‘runai config project ’ diff --git a/docs/Researcher/cli-reference/new-cli/runai_xgboost_logs.md b/docs/Researcher/cli-reference/new-cli/runai_xgboost_logs.md index 092fd083db..c00acbebee 100644 --- a/docs/Researcher/cli-reference/new-cli/runai_xgboost_logs.md +++ b/docs/Researcher/cli-reference/new-cli/runai_xgboost_logs.md @@ -56,7 +56,7 @@ runai training xgboost logs xgboost-01 --wait-timeout=30s -p, --project string Specify the project to which the command applies. By default, commands apply to the default project. To change the default project use ‘runai config project ’ --since duration Return logs newer than a relative duration like 5s, 2m, or 3h. Defaults to all logs --since-time string Return logs after a specific date (RFC3339) - -t, --tail int Numer of tailed lines to fetch from the log, for no limit set to -1 (default -1) + -t, --tail int Number of tailed lines to fetch from the log, for no limit set to -1 (default -1) --timestamps Show timestamps in log output --wait-timeout duration Timeout for waiting for workload to be ready for log streaming ``` diff --git a/docs/Researcher/cli-reference/new-cli/runai_xgboost_submit.md b/docs/Researcher/cli-reference/new-cli/runai_xgboost_submit.md index 2e2844a95e..22a349c7f0 100644 --- a/docs/Researcher/cli-reference/new-cli/runai_xgboost_submit.md +++ b/docs/Researcher/cli-reference/new-cli/runai_xgboost_submit.md @@ -46,7 +46,7 @@ runai training xgboost submit -p -i ubuntu --master-comman --cpu-memory-request string CPU memory to allocate for the job (e.g. 1G, 500M) --create-home-dir Create a temporary home directory. Defaults to true when --run-as-user is set, false otherwise -e, --environment stringArray Set environment variables in the container - --existing-pvc stringArray Mount an existing persistent volume. Use the format: claimname=CLAIM_NAME,path=PATH + --existing-pvc stringArray Mount an existing persistent volume. Use the format: claimname=CLAIM_NAME,path=PATH --extended-resource stringArray Request access to an extended resource. Use the format: resource_name=quantity --external-url stringArray Expose URL from the job container. Use the format: container=9443,url=https://external.runai.com,authusers=user1,authgroups=group1 --git-sync stringArray Specifies git repositories to mount into the container. Use the format: name=NAME,repository=REPO,path=PATH,secret=SECRET,rev=REVISION @@ -55,11 +55,11 @@ runai training xgboost submit -p -i ubuntu --master-comman --gpu-memory-request string GPU memory to allocate for the job (e.g. 1G, 500M) --gpu-portion-limit float GPU portion limit, must be no less than the gpu-memory-request (between 0 and 1, e.g. 0.5, 0.2) --gpu-portion-request float GPU portion request (between 0 and 1, e.g. 0.5, 0.2) - --gpu-request-type string GPU request type (portion|memory|migProfile) + --gpu-request-type string GPU request type (portion|memory|migProfile[Deprecated]) -h, --help help for submit --host-ipc Whether to enable host IPC. (Default: false) --host-network Whether to enable host networking. (Default: false) - --host-path stringArray Volumes to mount into the container. Use the format: path=PATH,mount=MOUNT,mount-propagation=None|HostToContainer,readwrite + --host-path stringArray host paths (Volumes) to mount into the container. Format: path=PATH,mount=MOUNT,mount-propagation=None|HostToContainer,readwrite -i, --image string The image for the workload --image-pull-policy string Set image pull policy. One of: Always, IfNotPresent, Never. Defaults to Always (default "Always") --label stringArray Set of labels to populate into the container running the workspace @@ -74,7 +74,7 @@ runai training xgboost submit -p -i ubuntu --master-comman --master-no-pvcs Do not mount any persistent volumes in the master pod --name-prefix string Set defined prefix for the workload name and add index as a suffix --new-pvc stringArray Mount a persistent volume, create it if it does not exist. Use the format: claimname=CLAIM_NAME,storageclass=STORAGE_CLASS,size=SIZE,path=PATH,accessmode-rwo,accessmode-rom,accessmode-rwm,ro,ephemeral - --nfs stringArray NFS storage details. Use the format: path=PATH,server=SERVER,mountpath=MOUNT_PATH,readwrite + --nfs stringArray NFS volumes to use in the workload. Format: path=PATH,server=SERVER,mountpath=MOUNT_PATH,readwrite --node-pools stringArray List of node pools to use for scheduling the job, ordered by priority --node-type string Enforce node type affinity by setting a node-type label --pod-running-timeout duration Pod check for running state timeout. @@ -85,10 +85,12 @@ runai training xgboost submit -p -i ubuntu --master-comman --run-as-gid int The group ID the container will run with --run-as-uid int The user ID the container will run with --run-as-user takes the uid, gid, and supplementary groups fields from the token, if all the fields do not exist, uses the local running terminal user credentials. if any of the fields exist take only the existing fields - --s3 stringArray s3 storage details. Use the format: name=NAME,bucket=BUCKET,path=PATH,accesskey=ACCESS_KEY,url=URL + --s3 stringArray s3 buckets to use in the workload. Format: name=NAME,bucket=BUCKET,path=PATH,accesskey=ACCESS_KEY,url=URL --seccomp-profile string Indicates which kind of seccomp profile will be applied to the container, options: RuntimeDefault|Unconfined|Localhost + --secret-volume stringArray Secret volumes to use in the workload. Format: path=PATH,name=SECRET_RESOURCE_NAME --stdin Keep stdin open on the container(s) in the pod, even if nothing is attached --supplemental-groups ints Comma seperated list of groups (IDs) that the user running the container belongs to + --termination-grace-period duration The length of time (like 5s or 2m, higher than zero) the workload's pod is expected to terminate gracefully upon probe failure. In case value is not specified, kubernetes default of 30 seconds applies (default 0s) --toleration stringArray Toleration details. Use the format: operator=Equal|Exists,key=KEY,[value=VALUE],[effect=NoSchedule|NoExecute|PreferNoSchedule],[seconds=SECONDS] -t, --tty Allocate a TTY for the container --user-group-source string Indicate the way to determine the user and group ids of the container, options: fromTheImage|fromIdpToken|fromIdpToken