diff --git a/elasticsearch/_async/client/cat.py b/elasticsearch/_async/client/cat.py index ba9795103..80120a0c0 100644 --- a/elasticsearch/_async/client/cat.py +++ b/elasticsearch/_async/client/cat.py @@ -1774,7 +1774,200 @@ async def nodes( filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, full_id: t.Optional[t.Union[bool, str]] = None, - h: t.Optional[t.Union[str, t.Sequence[str]]] = None, + h: t.Optional[ + t.Union[ + t.Sequence[ + t.Union[ + str, + t.Literal[ + "build", + "completion.size", + "cpu", + "disk.avail", + "disk.total", + "disk.used", + "disk.used_percent", + "fielddata.evictions", + "fielddata.memory_size", + "file_desc.current", + "file_desc.max", + "file_desc.percent", + "flush.total", + "flush.total_time", + "get.current", + "get.exists_time", + "get.exists_total", + "get.missing_time", + "get.missing_total", + "get.time", + "get.total", + "heap.current", + "heap.max", + "heap.percent", + "http_address", + "id", + "indexing.delete_current", + "indexing.delete_time", + "indexing.delete_total", + "indexing.index_current", + "indexing.index_failed", + "indexing.index_failed_due_to_version_conflict", + "indexing.index_time", + "indexing.index_total", + "ip", + "jdk", + "load_15m", + "load_1m", + "load_5m", + "mappings.total_count", + "mappings.total_estimated_overhead_in_bytes", + "master", + "merges.current", + "merges.current_docs", + "merges.current_size", + "merges.total", + "merges.total_docs", + "merges.total_size", + "merges.total_time", + "name", + "node.role", + "pid", + "port", + "query_cache.evictions", + "query_cache.hit_count", + "query_cache.memory_size", + "query_cache.miss_count", + "ram.current", + "ram.max", + "ram.percent", + "refresh.time", + "refresh.total", + "request_cache.evictions", + "request_cache.hit_count", + "request_cache.memory_size", + "request_cache.miss_count", + "script.cache_evictions", + "script.compilations", + "search.fetch_current", + "search.fetch_time", + "search.fetch_total", + "search.open_contexts", + "search.query_current", + "search.query_time", + "search.query_total", + "search.scroll_current", + "search.scroll_time", + "search.scroll_total", + "segments.count", + "segments.fixed_bitset_memory", + "segments.index_writer_memory", + "segments.memory", + "segments.version_map_memory", + "shard_stats.total_count", + "suggest.current", + "suggest.time", + "suggest.total", + "uptime", + "version", + ], + ] + ], + t.Union[ + str, + t.Literal[ + "build", + "completion.size", + "cpu", + "disk.avail", + "disk.total", + "disk.used", + "disk.used_percent", + "fielddata.evictions", + "fielddata.memory_size", + "file_desc.current", + "file_desc.max", + "file_desc.percent", + "flush.total", + "flush.total_time", + "get.current", + "get.exists_time", + "get.exists_total", + "get.missing_time", + "get.missing_total", + "get.time", + "get.total", + "heap.current", + "heap.max", + "heap.percent", + "http_address", + "id", + "indexing.delete_current", + "indexing.delete_time", + "indexing.delete_total", + "indexing.index_current", + "indexing.index_failed", + "indexing.index_failed_due_to_version_conflict", + "indexing.index_time", + "indexing.index_total", + "ip", + "jdk", + "load_15m", + "load_1m", + "load_5m", + "mappings.total_count", + "mappings.total_estimated_overhead_in_bytes", + "master", + "merges.current", + "merges.current_docs", + "merges.current_size", + "merges.total", + "merges.total_docs", + "merges.total_size", + "merges.total_time", + "name", + "node.role", + "pid", + "port", + "query_cache.evictions", + "query_cache.hit_count", + "query_cache.memory_size", + "query_cache.miss_count", + "ram.current", + "ram.max", + "ram.percent", + "refresh.time", + "refresh.total", + "request_cache.evictions", + "request_cache.hit_count", + "request_cache.memory_size", + "request_cache.miss_count", + "script.cache_evictions", + "script.compilations", + "search.fetch_current", + "search.fetch_time", + "search.fetch_total", + "search.open_contexts", + "search.query_current", + "search.query_time", + "search.query_total", + "search.scroll_current", + "search.scroll_time", + "search.scroll_total", + "segments.count", + "segments.fixed_bitset_memory", + "segments.index_writer_memory", + "segments.memory", + "segments.version_map_memory", + "shard_stats.total_count", + "suggest.current", + "suggest.time", + "suggest.total", + "uptime", + "version", + ], + ], + ] + ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, include_unloaded_segments: t.Optional[bool] = None, @@ -1801,16 +1994,17 @@ async def nodes( to `text`, `json`, `cbor`, `yaml`, or `smile`. :param full_id: If `true`, return the full node ID. If `false`, return the shortened node ID. - :param h: List of columns to appear in the response. Supports simple wildcards. + :param h: A comma-separated list of columns names to display. It supports simple + wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. :param include_unloaded_segments: If true, the response includes information from segments that are not loaded into memory. - :param master_timeout: Period to wait for a connection to the master node. - :param s: List of columns that determine how the table should be sorted. Sorting - defaults to ascending and can be changed by setting `:asc` or `:desc` as - a suffix to the column name. - :param time: Unit used to display time values. + :param master_timeout: The period to wait for a connection to the master node. + :param s: A comma-separated list of column names or aliases that determines the + sort order. Sorting defaults to ascending and can be changed by setting `:asc` + or `:desc` as a suffix to the column name. + :param time: The unit used to display time values. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] = {} diff --git a/elasticsearch/_async/client/indices.py b/elasticsearch/_async/client/indices.py index be758d06f..b83fe4de2 100644 --- a/elasticsearch/_async/client/indices.py +++ b/elasticsearch/_async/client/indices.py @@ -1246,7 +1246,8 @@ async def delete_template( """ .. raw:: html -

Delete a legacy index template.

+

Delete a legacy index template. + IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.

``_ @@ -2878,7 +2879,7 @@ async def get_template( """ .. raw:: html -

Get index templates. +

Get legacy index templates. Get information about one or more index templates.

IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.

@@ -3850,8 +3851,34 @@ async def put_settings( Changes dynamic index settings in real time. For data streams, index setting changes are applied to all backing indices by default.

To revert a setting to the default value, use a null value. - The list of per-index settings that can be updated dynamically on live indices can be found in index module documentation. + The list of per-index settings that can be updated dynamically on live indices can be found in index settings documentation. To preserve existing settings from being updated, set the preserve_existing parameter to true.

+

There are multiple valid ways to represent index settings in the request body. You can specify only the setting, for example:

+
{
+            "number_of_replicas": 1
+          }
+          
+

Or you can use an index setting object:

+
{
+            "index": {
+              "number_of_replicas": 1
+            }
+          }
+          
+

Or you can use dot annotation:

+
{
+            "index.number_of_replicas": 1
+          }
+          
+

Or you can embed any of the aforementioned options in a settings object. For example:

+
{
+            "settings": {
+              "index": {
+                "number_of_replicas": 1
+              }
+            }
+          }
+          

NOTE: You can only define new analyzers on closed indices. To add an analyzer, you must close the index, define the analyzer, and reopen the index. You cannot close the write index of a data stream. @@ -3971,7 +3998,7 @@ async def put_template( """ .. raw:: html -

Create or update an index template. +

Create or update a legacy index template. Index templates define settings, mappings, and aliases that can be applied automatically to new indices. Elasticsearch applies templates to new indices based on an index pattern that matches the index name.

IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.

diff --git a/elasticsearch/_async/client/inference.py b/elasticsearch/_async/client/inference.py index 7d90246e5..eef722b8e 100644 --- a/elasticsearch/_async/client/inference.py +++ b/elasticsearch/_async/client/inference.py @@ -370,12 +370,7 @@ async def put( """ .. raw:: html -

Create an inference endpoint. - When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

+

Create an inference endpoint.

IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.

@@ -458,11 +453,6 @@ async def put_alibabacloud(

Create an AlibabaCloud AI Search inference endpoint.

Create an inference endpoint to perform an inference task with the alibabacloud-ai-search service.

-

When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

``_ @@ -558,11 +548,6 @@ async def put_amazonbedrock(

info You need to provide the access and secret keys only once, during the inference model creation. The get inference API does not retrieve your access or secret keys. After creating the inference model, you cannot change the associated key pairs. If you want to use a different access and secret key pair, delete the inference model and recreate it with the same name and the updated keys.

-

When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

``_ @@ -654,11 +639,6 @@ async def put_anthropic(

Create an Anthropic inference endpoint.

Create an inference endpoint to perform an inference task with the anthropic service.

-

When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

``_ @@ -751,11 +731,6 @@ async def put_azureaistudio(

Create an Azure AI studio inference endpoint.

Create an inference endpoint to perform an inference task with the azureaistudio service.

-

When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

``_ @@ -853,11 +828,6 @@ async def put_azureopenai(
  • GPT-3.5
  • The list of embeddings models that you can choose from in your deployment can be found in the Azure models documentation.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    ``_ @@ -951,11 +921,6 @@ async def put_cohere(

    Create a Cohere inference endpoint.

    Create an inference endpoint to perform an inference task with the cohere service.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    ``_ @@ -1239,11 +1204,6 @@ async def put_googleaistudio(

    Create an Google AI Studio inference endpoint.

    Create an inference endpoint to perform an inference task with the googleaistudio service.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    ``_ @@ -1331,11 +1291,6 @@ async def put_googlevertexai(

    Create a Google Vertex AI inference endpoint.

    Create an inference endpoint to perform an inference task with the googlevertexai service.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    ``_ @@ -1434,11 +1389,6 @@ async def put_hugging_face(
  • multilingual-e5-base
  • multilingual-e5-small
  • -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    ``_ @@ -1528,11 +1478,6 @@ async def put_jinaai(

    Create an inference endpoint to perform an inference task with the jinaai service.

    To review the available rerank models, refer to https://jina.ai/reranker. To review the available text_embedding models, refer to the https://jina.ai/embeddings/.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    ``_ @@ -1616,11 +1561,6 @@ async def put_mistral(

    Create a Mistral inference endpoint.

    Creates an inference endpoint to perform an inference task with the mistral service.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    ``_ @@ -1709,11 +1649,6 @@ async def put_openai(

    Create an OpenAI inference endpoint.

    Create an inference endpoint to perform an inference task with the openai service or openai compatible APIs.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    ``_ @@ -1890,11 +1825,6 @@ async def put_watsonx(

    Create an inference endpoint to perform an inference task with the watsonxai service. You need an IBM Cloud Databases for Elasticsearch deployment to use the watsonxai inference service. You can provision one through the IBM catalog, the Cloud Databases CLI plug-in, the Cloud Databases API, or Terraform.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    ``_ diff --git a/elasticsearch/_sync/client/cat.py b/elasticsearch/_sync/client/cat.py index 6389cd70d..efd9a60c5 100644 --- a/elasticsearch/_sync/client/cat.py +++ b/elasticsearch/_sync/client/cat.py @@ -1774,7 +1774,200 @@ def nodes( filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, full_id: t.Optional[t.Union[bool, str]] = None, - h: t.Optional[t.Union[str, t.Sequence[str]]] = None, + h: t.Optional[ + t.Union[ + t.Sequence[ + t.Union[ + str, + t.Literal[ + "build", + "completion.size", + "cpu", + "disk.avail", + "disk.total", + "disk.used", + "disk.used_percent", + "fielddata.evictions", + "fielddata.memory_size", + "file_desc.current", + "file_desc.max", + "file_desc.percent", + "flush.total", + "flush.total_time", + "get.current", + "get.exists_time", + "get.exists_total", + "get.missing_time", + "get.missing_total", + "get.time", + "get.total", + "heap.current", + "heap.max", + "heap.percent", + "http_address", + "id", + "indexing.delete_current", + "indexing.delete_time", + "indexing.delete_total", + "indexing.index_current", + "indexing.index_failed", + "indexing.index_failed_due_to_version_conflict", + "indexing.index_time", + "indexing.index_total", + "ip", + "jdk", + "load_15m", + "load_1m", + "load_5m", + "mappings.total_count", + "mappings.total_estimated_overhead_in_bytes", + "master", + "merges.current", + "merges.current_docs", + "merges.current_size", + "merges.total", + "merges.total_docs", + "merges.total_size", + "merges.total_time", + "name", + "node.role", + "pid", + "port", + "query_cache.evictions", + "query_cache.hit_count", + "query_cache.memory_size", + "query_cache.miss_count", + "ram.current", + "ram.max", + "ram.percent", + "refresh.time", + "refresh.total", + "request_cache.evictions", + "request_cache.hit_count", + "request_cache.memory_size", + "request_cache.miss_count", + "script.cache_evictions", + "script.compilations", + "search.fetch_current", + "search.fetch_time", + "search.fetch_total", + "search.open_contexts", + "search.query_current", + "search.query_time", + "search.query_total", + "search.scroll_current", + "search.scroll_time", + "search.scroll_total", + "segments.count", + "segments.fixed_bitset_memory", + "segments.index_writer_memory", + "segments.memory", + "segments.version_map_memory", + "shard_stats.total_count", + "suggest.current", + "suggest.time", + "suggest.total", + "uptime", + "version", + ], + ] + ], + t.Union[ + str, + t.Literal[ + "build", + "completion.size", + "cpu", + "disk.avail", + "disk.total", + "disk.used", + "disk.used_percent", + "fielddata.evictions", + "fielddata.memory_size", + "file_desc.current", + "file_desc.max", + "file_desc.percent", + "flush.total", + "flush.total_time", + "get.current", + "get.exists_time", + "get.exists_total", + "get.missing_time", + "get.missing_total", + "get.time", + "get.total", + "heap.current", + "heap.max", + "heap.percent", + "http_address", + "id", + "indexing.delete_current", + "indexing.delete_time", + "indexing.delete_total", + "indexing.index_current", + "indexing.index_failed", + "indexing.index_failed_due_to_version_conflict", + "indexing.index_time", + "indexing.index_total", + "ip", + "jdk", + "load_15m", + "load_1m", + "load_5m", + "mappings.total_count", + "mappings.total_estimated_overhead_in_bytes", + "master", + "merges.current", + "merges.current_docs", + "merges.current_size", + "merges.total", + "merges.total_docs", + "merges.total_size", + "merges.total_time", + "name", + "node.role", + "pid", + "port", + "query_cache.evictions", + "query_cache.hit_count", + "query_cache.memory_size", + "query_cache.miss_count", + "ram.current", + "ram.max", + "ram.percent", + "refresh.time", + "refresh.total", + "request_cache.evictions", + "request_cache.hit_count", + "request_cache.memory_size", + "request_cache.miss_count", + "script.cache_evictions", + "script.compilations", + "search.fetch_current", + "search.fetch_time", + "search.fetch_total", + "search.open_contexts", + "search.query_current", + "search.query_time", + "search.query_total", + "search.scroll_current", + "search.scroll_time", + "search.scroll_total", + "segments.count", + "segments.fixed_bitset_memory", + "segments.index_writer_memory", + "segments.memory", + "segments.version_map_memory", + "shard_stats.total_count", + "suggest.current", + "suggest.time", + "suggest.total", + "uptime", + "version", + ], + ], + ] + ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, include_unloaded_segments: t.Optional[bool] = None, @@ -1801,16 +1994,17 @@ def nodes( to `text`, `json`, `cbor`, `yaml`, or `smile`. :param full_id: If `true`, return the full node ID. If `false`, return the shortened node ID. - :param h: List of columns to appear in the response. Supports simple wildcards. + :param h: A comma-separated list of columns names to display. It supports simple + wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. :param include_unloaded_segments: If true, the response includes information from segments that are not loaded into memory. - :param master_timeout: Period to wait for a connection to the master node. - :param s: List of columns that determine how the table should be sorted. Sorting - defaults to ascending and can be changed by setting `:asc` or `:desc` as - a suffix to the column name. - :param time: Unit used to display time values. + :param master_timeout: The period to wait for a connection to the master node. + :param s: A comma-separated list of column names or aliases that determines the + sort order. Sorting defaults to ascending and can be changed by setting `:asc` + or `:desc` as a suffix to the column name. + :param time: The unit used to display time values. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] = {} diff --git a/elasticsearch/_sync/client/indices.py b/elasticsearch/_sync/client/indices.py index 625e4a89f..13c824092 100644 --- a/elasticsearch/_sync/client/indices.py +++ b/elasticsearch/_sync/client/indices.py @@ -1246,7 +1246,8 @@ def delete_template( """ .. raw:: html -

    Delete a legacy index template.

    +

    Delete a legacy index template. + IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.

    ``_ @@ -2878,7 +2879,7 @@ def get_template( """ .. raw:: html -

    Get index templates. +

    Get legacy index templates. Get information about one or more index templates.

    IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.

    @@ -3850,8 +3851,34 @@ def put_settings( Changes dynamic index settings in real time. For data streams, index setting changes are applied to all backing indices by default.

    To revert a setting to the default value, use a null value. - The list of per-index settings that can be updated dynamically on live indices can be found in index module documentation. + The list of per-index settings that can be updated dynamically on live indices can be found in index settings documentation. To preserve existing settings from being updated, set the preserve_existing parameter to true.

    +

    There are multiple valid ways to represent index settings in the request body. You can specify only the setting, for example:

    +
    {
    +            "number_of_replicas": 1
    +          }
    +          
    +

    Or you can use an index setting object:

    +
    {
    +            "index": {
    +              "number_of_replicas": 1
    +            }
    +          }
    +          
    +

    Or you can use dot annotation:

    +
    {
    +            "index.number_of_replicas": 1
    +          }
    +          
    +

    Or you can embed any of the aforementioned options in a settings object. For example:

    +
    {
    +            "settings": {
    +              "index": {
    +                "number_of_replicas": 1
    +              }
    +            }
    +          }
    +          

    NOTE: You can only define new analyzers on closed indices. To add an analyzer, you must close the index, define the analyzer, and reopen the index. You cannot close the write index of a data stream. @@ -3971,7 +3998,7 @@ def put_template( """ .. raw:: html -

    Create or update an index template. +

    Create or update a legacy index template. Index templates define settings, mappings, and aliases that can be applied automatically to new indices. Elasticsearch applies templates to new indices based on an index pattern that matches the index name.

    IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.

    diff --git a/elasticsearch/_sync/client/inference.py b/elasticsearch/_sync/client/inference.py index e77ad84f0..318daab00 100644 --- a/elasticsearch/_sync/client/inference.py +++ b/elasticsearch/_sync/client/inference.py @@ -370,12 +370,7 @@ def put( """ .. raw:: html -

    Create an inference endpoint. - When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    +

    Create an inference endpoint.

    IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.

    @@ -458,11 +453,6 @@ def put_alibabacloud(

    Create an AlibabaCloud AI Search inference endpoint.

    Create an inference endpoint to perform an inference task with the alibabacloud-ai-search service.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    ``_ @@ -558,11 +548,6 @@ def put_amazonbedrock(

    info You need to provide the access and secret keys only once, during the inference model creation. The get inference API does not retrieve your access or secret keys. After creating the inference model, you cannot change the associated key pairs. If you want to use a different access and secret key pair, delete the inference model and recreate it with the same name and the updated keys.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    ``_ @@ -654,11 +639,6 @@ def put_anthropic(

    Create an Anthropic inference endpoint.

    Create an inference endpoint to perform an inference task with the anthropic service.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    ``_ @@ -751,11 +731,6 @@ def put_azureaistudio(

    Create an Azure AI studio inference endpoint.

    Create an inference endpoint to perform an inference task with the azureaistudio service.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    ``_ @@ -853,11 +828,6 @@ def put_azureopenai(
  • GPT-3.5
  • The list of embeddings models that you can choose from in your deployment can be found in the Azure models documentation.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    ``_ @@ -951,11 +921,6 @@ def put_cohere(

    Create a Cohere inference endpoint.

    Create an inference endpoint to perform an inference task with the cohere service.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    ``_ @@ -1239,11 +1204,6 @@ def put_googleaistudio(

    Create an Google AI Studio inference endpoint.

    Create an inference endpoint to perform an inference task with the googleaistudio service.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    ``_ @@ -1331,11 +1291,6 @@ def put_googlevertexai(

    Create a Google Vertex AI inference endpoint.

    Create an inference endpoint to perform an inference task with the googlevertexai service.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    ``_ @@ -1434,11 +1389,6 @@ def put_hugging_face(
  • multilingual-e5-base
  • multilingual-e5-small
  • -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    ``_ @@ -1528,11 +1478,6 @@ def put_jinaai(

    Create an inference endpoint to perform an inference task with the jinaai service.

    To review the available rerank models, refer to https://jina.ai/reranker. To review the available text_embedding models, refer to the https://jina.ai/embeddings/.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    ``_ @@ -1616,11 +1561,6 @@ def put_mistral(

    Create a Mistral inference endpoint.

    Creates an inference endpoint to perform an inference task with the mistral service.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    ``_ @@ -1709,11 +1649,6 @@ def put_openai(

    Create an OpenAI inference endpoint.

    Create an inference endpoint to perform an inference task with the openai service or openai compatible APIs.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    ``_ @@ -1890,11 +1825,6 @@ def put_watsonx(

    Create an inference endpoint to perform an inference task with the watsonxai service. You need an IBM Cloud Databases for Elasticsearch deployment to use the watsonxai inference service. You can provision one through the IBM catalog, the Cloud Databases CLI plug-in, the Cloud Databases API, or Terraform.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    ``_ diff --git a/elasticsearch/dsl/field.py b/elasticsearch/dsl/field.py index e3ed5dfcd..8c0d57592 100644 --- a/elasticsearch/dsl/field.py +++ b/elasticsearch/dsl/field.py @@ -3689,11 +3689,6 @@ class SemanticText(Field): by using the Update mapping API. Use the Create inference API to create the endpoint. If not specified, the inference endpoint defined by inference_id will be used at both index and query time. - :arg chunking_settings: Settings for chunking text into smaller - passages. If specified, these will override the chunking settings - sent in the inference endpoint associated with inference_id. If - chunking settings are updated, they will not be applied to - existing documents until they are reindexed. """ name = "semantic_text" @@ -3704,9 +3699,6 @@ def __init__( meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, inference_id: Union[str, "DefaultType"] = DEFAULT, search_inference_id: Union[str, "DefaultType"] = DEFAULT, - chunking_settings: Union[ - "types.ChunkingSettings", Dict[str, Any], "DefaultType" - ] = DEFAULT, **kwargs: Any, ): if meta is not DEFAULT: @@ -3715,8 +3707,6 @@ def __init__( kwargs["inference_id"] = inference_id if search_inference_id is not DEFAULT: kwargs["search_inference_id"] = search_inference_id - if chunking_settings is not DEFAULT: - kwargs["chunking_settings"] = chunking_settings super().__init__(*args, **kwargs) diff --git a/elasticsearch/dsl/query.py b/elasticsearch/dsl/query.py index 720f49f78..f203332e4 100644 --- a/elasticsearch/dsl/query.py +++ b/elasticsearch/dsl/query.py @@ -2034,8 +2034,9 @@ def __init__( class Rule(Query): """ :arg organic: (required) - :arg ruleset_ids: (required) :arg match_criteria: (required) + :arg ruleset_ids: + :arg ruleset_id: :arg boost: Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases @@ -2053,16 +2054,18 @@ def __init__( self, *, organic: Union[Query, "DefaultType"] = DEFAULT, - ruleset_ids: Union[Sequence[str], "DefaultType"] = DEFAULT, match_criteria: Any = DEFAULT, + ruleset_ids: Union[str, Sequence[str], "DefaultType"] = DEFAULT, + ruleset_id: Union[str, "DefaultType"] = DEFAULT, boost: Union[float, "DefaultType"] = DEFAULT, _name: Union[str, "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__( organic=organic, - ruleset_ids=ruleset_ids, match_criteria=match_criteria, + ruleset_ids=ruleset_ids, + ruleset_id=ruleset_id, boost=boost, _name=_name, **kwargs, diff --git a/elasticsearch/dsl/types.py b/elasticsearch/dsl/types.py index d21f70698..5d7b88b32 100644 --- a/elasticsearch/dsl/types.py +++ b/elasticsearch/dsl/types.py @@ -170,48 +170,6 @@ def __init__( super().__init__(kwargs) -class ChunkingSettings(AttrDict[Any]): - """ - :arg strategy: (required) The chunking strategy: `sentence` or `word`. - Defaults to `sentence` if omitted. - :arg max_chunk_size: (required) The maximum size of a chunk in words. - This value cannot be higher than `300` or lower than `20` (for - `sentence` strategy) or `10` (for `word` strategy). Defaults to - `250` if omitted. - :arg overlap: The number of overlapping words for chunks. It is - applicable only to a `word` chunking strategy. This value cannot - be higher than half the `max_chunk_size` value. Defaults to `100` - if omitted. - :arg sentence_overlap: The number of overlapping sentences for chunks. - It is applicable only for a `sentence` chunking strategy. It can - be either `1` or `0`. Defaults to `1` if omitted. - """ - - strategy: Union[str, DefaultType] - max_chunk_size: Union[int, DefaultType] - overlap: Union[int, DefaultType] - sentence_overlap: Union[int, DefaultType] - - def __init__( - self, - *, - strategy: Union[str, DefaultType] = DEFAULT, - max_chunk_size: Union[int, DefaultType] = DEFAULT, - overlap: Union[int, DefaultType] = DEFAULT, - sentence_overlap: Union[int, DefaultType] = DEFAULT, - **kwargs: Any, - ): - if strategy is not DEFAULT: - kwargs["strategy"] = strategy - if max_chunk_size is not DEFAULT: - kwargs["max_chunk_size"] = max_chunk_size - if overlap is not DEFAULT: - kwargs["overlap"] = overlap - if sentence_overlap is not DEFAULT: - kwargs["sentence_overlap"] = sentence_overlap - super().__init__(kwargs) - - class ClassificationInferenceOptions(AttrDict[Any]): """ :arg num_top_classes: Specifies the number of top class predictions to @@ -969,7 +927,7 @@ def __init__( class GeoGridQuery(AttrDict[Any]): """ - :arg geogrid: + :arg geotile: :arg geohash: :arg geohex: :arg boost: Floating point number used to decrease or increase the @@ -980,7 +938,7 @@ class GeoGridQuery(AttrDict[Any]): :arg _name: """ - geogrid: Union[str, DefaultType] + geotile: Union[str, DefaultType] geohash: Union[str, DefaultType] geohex: Union[str, DefaultType] boost: Union[float, DefaultType] @@ -989,15 +947,15 @@ class GeoGridQuery(AttrDict[Any]): def __init__( self, *, - geogrid: Union[str, DefaultType] = DEFAULT, + geotile: Union[str, DefaultType] = DEFAULT, geohash: Union[str, DefaultType] = DEFAULT, geohex: Union[str, DefaultType] = DEFAULT, boost: Union[float, DefaultType] = DEFAULT, _name: Union[str, DefaultType] = DEFAULT, **kwargs: Any, ): - if geogrid is not DEFAULT: - kwargs["geogrid"] = geogrid + if geotile is not DEFAULT: + kwargs["geotile"] = geotile if geohash is not DEFAULT: kwargs["geohash"] = geohash if geohex is not DEFAULT: @@ -1823,6 +1781,8 @@ class IntervalsContainer(AttrDict[Any]): :arg match: Matches analyzed text. :arg prefix: Matches terms that start with a specified set of characters. + :arg range: + :arg regexp: :arg wildcard: Matches terms using a wildcard pattern. """ @@ -1831,6 +1791,8 @@ class IntervalsContainer(AttrDict[Any]): fuzzy: Union["IntervalsFuzzy", Dict[str, Any], DefaultType] match: Union["IntervalsMatch", Dict[str, Any], DefaultType] prefix: Union["IntervalsPrefix", Dict[str, Any], DefaultType] + range: Union["IntervalsRange", Dict[str, Any], DefaultType] + regexp: Union["IntervalsRegexp", Dict[str, Any], DefaultType] wildcard: Union["IntervalsWildcard", Dict[str, Any], DefaultType] def __init__( @@ -1841,6 +1803,8 @@ def __init__( fuzzy: Union["IntervalsFuzzy", Dict[str, Any], DefaultType] = DEFAULT, match: Union["IntervalsMatch", Dict[str, Any], DefaultType] = DEFAULT, prefix: Union["IntervalsPrefix", Dict[str, Any], DefaultType] = DEFAULT, + range: Union["IntervalsRange", Dict[str, Any], DefaultType] = DEFAULT, + regexp: Union["IntervalsRegexp", Dict[str, Any], DefaultType] = DEFAULT, wildcard: Union["IntervalsWildcard", Dict[str, Any], DefaultType] = DEFAULT, **kwargs: Any, ): @@ -1854,6 +1818,10 @@ def __init__( kwargs["match"] = match if prefix is not DEFAULT: kwargs["prefix"] = prefix + if range is not DEFAULT: + kwargs["range"] = range + if regexp is not DEFAULT: + kwargs["regexp"] = regexp if wildcard is not DEFAULT: kwargs["wildcard"] = wildcard super().__init__(kwargs) @@ -2074,6 +2042,8 @@ class IntervalsQuery(AttrDict[Any]): :arg match: Matches analyzed text. :arg prefix: Matches terms that start with a specified set of characters. + :arg range: + :arg regexp: :arg wildcard: Matches terms using a wildcard pattern. :arg boost: Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the @@ -2088,6 +2058,8 @@ class IntervalsQuery(AttrDict[Any]): fuzzy: Union["IntervalsFuzzy", Dict[str, Any], DefaultType] match: Union["IntervalsMatch", Dict[str, Any], DefaultType] prefix: Union["IntervalsPrefix", Dict[str, Any], DefaultType] + range: Union["IntervalsRange", Dict[str, Any], DefaultType] + regexp: Union["IntervalsRegexp", Dict[str, Any], DefaultType] wildcard: Union["IntervalsWildcard", Dict[str, Any], DefaultType] boost: Union[float, DefaultType] _name: Union[str, DefaultType] @@ -2100,6 +2072,8 @@ def __init__( fuzzy: Union["IntervalsFuzzy", Dict[str, Any], DefaultType] = DEFAULT, match: Union["IntervalsMatch", Dict[str, Any], DefaultType] = DEFAULT, prefix: Union["IntervalsPrefix", Dict[str, Any], DefaultType] = DEFAULT, + range: Union["IntervalsRange", Dict[str, Any], DefaultType] = DEFAULT, + regexp: Union["IntervalsRegexp", Dict[str, Any], DefaultType] = DEFAULT, wildcard: Union["IntervalsWildcard", Dict[str, Any], DefaultType] = DEFAULT, boost: Union[float, DefaultType] = DEFAULT, _name: Union[str, DefaultType] = DEFAULT, @@ -2115,6 +2089,10 @@ def __init__( kwargs["match"] = match if prefix is not DEFAULT: kwargs["prefix"] = prefix + if range is not DEFAULT: + kwargs["range"] = range + if regexp is not DEFAULT: + kwargs["regexp"] = regexp if wildcard is not DEFAULT: kwargs["wildcard"] = wildcard if boost is not DEFAULT: @@ -2124,6 +2102,83 @@ def __init__( super().__init__(kwargs) +class IntervalsRange(AttrDict[Any]): + """ + :arg analyzer: Analyzer used to analyze the `prefix`. + :arg gte: Lower term, either gte or gt must be provided. + :arg gt: Lower term, either gte or gt must be provided. + :arg lte: Upper term, either lte or lt must be provided. + :arg lt: Upper term, either lte or lt must be provided. + :arg use_field: If specified, match intervals from this field rather + than the top-level field. The `prefix` is normalized using the + search analyzer from this field, unless `analyzer` is specified + separately. + """ + + analyzer: Union[str, DefaultType] + gte: Union[str, DefaultType] + gt: Union[str, DefaultType] + lte: Union[str, DefaultType] + lt: Union[str, DefaultType] + use_field: Union[str, InstrumentedField, DefaultType] + + def __init__( + self, + *, + analyzer: Union[str, DefaultType] = DEFAULT, + gte: Union[str, DefaultType] = DEFAULT, + gt: Union[str, DefaultType] = DEFAULT, + lte: Union[str, DefaultType] = DEFAULT, + lt: Union[str, DefaultType] = DEFAULT, + use_field: Union[str, InstrumentedField, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if analyzer is not DEFAULT: + kwargs["analyzer"] = analyzer + if gte is not DEFAULT: + kwargs["gte"] = gte + if gt is not DEFAULT: + kwargs["gt"] = gt + if lte is not DEFAULT: + kwargs["lte"] = lte + if lt is not DEFAULT: + kwargs["lt"] = lt + if use_field is not DEFAULT: + kwargs["use_field"] = str(use_field) + super().__init__(kwargs) + + +class IntervalsRegexp(AttrDict[Any]): + """ + :arg pattern: (required) Regex pattern. + :arg analyzer: Analyzer used to analyze the `prefix`. + :arg use_field: If specified, match intervals from this field rather + than the top-level field. The `prefix` is normalized using the + search analyzer from this field, unless `analyzer` is specified + separately. + """ + + pattern: Union[str, DefaultType] + analyzer: Union[str, DefaultType] + use_field: Union[str, InstrumentedField, DefaultType] + + def __init__( + self, + *, + pattern: Union[str, DefaultType] = DEFAULT, + analyzer: Union[str, DefaultType] = DEFAULT, + use_field: Union[str, InstrumentedField, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if pattern is not DEFAULT: + kwargs["pattern"] = pattern + if analyzer is not DEFAULT: + kwargs["analyzer"] = analyzer + if use_field is not DEFAULT: + kwargs["use_field"] = str(use_field) + super().__init__(kwargs) + + class IntervalsWildcard(AttrDict[Any]): """ :arg pattern: (required) Wildcard pattern used to find matching terms. @@ -4828,7 +4883,7 @@ class ErrorCause(AttrDict[Any]): """ type: str - reason: str + reason: Union[str, None] stack_trace: str caused_by: "ErrorCause" root_cause: Sequence["ErrorCause"]