diff --git a/.gemini/styleguide.md b/.gemini/styleguide.md
index f271f74cb4..13b38e856f 100644
--- a/.gemini/styleguide.md
+++ b/.gemini/styleguide.md
@@ -101,10 +101,10 @@ def render_header(header: str) -> str:
Renders a (markdown) heading.
Args:
- header (str): header
+ header: header
Returns:
- str: The rendered header
+ The rendered header
"""
return f"{header}\n{'=' * len(header)}\n"
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 4ad3a87c28..876744fa7e 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -65,3 +65,6 @@ repos:
- --quiet # otherwise prints all checked files that are ok...
- --skip-checking-raises=true
- --check-class-attributes=false
+ - --arg-type-hints-in-docstring=False
+ - --check-return-types=False
+ - --check-yield-types=False
\ No newline at end of file
diff --git a/cognite/client/_api/agents/agents.py b/cognite/client/_api/agents/agents.py
index 0a277bfbb7..28d633dd9d 100644
--- a/cognite/client/_api/agents/agents.py
+++ b/cognite/client/_api/agents/agents.py
@@ -40,10 +40,10 @@ async def upsert(self, agents: AgentUpsert | Sequence[AgentUpsert]) -> Agent | A
"""`Create or update (upsert) one or more agents. `_
Args:
- agents (AgentUpsert | Sequence[AgentUpsert]): Agent or list of agents to create or update.
+ agents: Agent or list of agents to create or update.
Returns:
- Agent | AgentList: The created or updated agent(s).
+ The created or updated agent(s).
Examples:
@@ -178,11 +178,11 @@ async def retrieve(
"""`Retrieve one or more agents by external ID. `_
Args:
- external_ids (str | SequenceNotStr[str]): The external id of the agent(s) to retrieve.
- ignore_unknown_ids (bool): Whether to ignore unknown IDs. Defaults to False.
+ external_ids: The external id of the agent(s) to retrieve.
+ ignore_unknown_ids: Whether to ignore unknown IDs. Defaults to False.
Returns:
- Agent | AgentList | None: The requested agent or agent list. `None` is returned if `ignore_unknown_ids` is `True` and the external ID is not found.
+ The requested agent or agent list. `None` is returned if `ignore_unknown_ids` is `True` and the external ID is not found.
Examples:
@@ -210,8 +210,8 @@ async def delete(self, external_ids: str | SequenceNotStr[str], ignore_unknown_i
"""`Delete one or more agents. `_
Args:
- external_ids (str | SequenceNotStr[str]): External ID of the agent or a list of external ids.
- ignore_unknown_ids (bool): If `True`, the call will ignore unknown external IDs. Defaults to False.
+ external_ids: External ID of the agent or a list of external ids.
+ ignore_unknown_ids: If `True`, the call will ignore unknown external IDs. Defaults to False.
Examples:
@@ -234,7 +234,7 @@ async def list(self) -> AgentList: # The API does not yet support limit or pagi
"""`List agents. `_
Returns:
- AgentList: The list of agents.
+ The list of agents.
Examples:
@@ -263,14 +263,13 @@ async def chat(
Users can ensure conversation continuity by including the cursor from the previous response in subsequent requests.
Args:
- agent_external_id (str): External ID that uniquely identifies the agent.
- messages (Message | ActionResult | Sequence[Message | ActionResult]): A list of one or many input messages to the agent. Can include regular messages and action results.
- cursor (str | None): The cursor to use for continuation of a conversation. Use this to
- create multi-turn conversations, as the cursor will keep track of the conversation state.
- actions (Sequence[Action] | None): A list of client-side actions that can be called by the agent.
+ agent_external_id: External ID that uniquely identifies the agent.
+ messages: A list of one or many input messages to the agent. Can include regular messages and action results.
+ cursor: The cursor to use for continuation of a conversation. Use this to create multi-turn conversations, as the cursor will keep track of the conversation state.
+ actions: A list of client-side actions that can be called by the agent.
Returns:
- AgentChatResponse: The response from the agent.
+ The response from the agent.
Examples:
diff --git a/cognite/client/_api/ai/tools/documents.py b/cognite/client/_api/ai/tools/documents.py
index 9ac2f27ecf..d4a194e77b 100644
--- a/cognite/client/_api/ai/tools/documents.py
+++ b/cognite/client/_api/ai/tools/documents.py
@@ -25,12 +25,12 @@ async def summarize(
this may be extended in the future.
Args:
- id (int | None): The ID of the document
- external_id (str | None): The external ID of the document
- instance_id (NodeId | None): The instance ID of the document
+ id: The ID of the document
+ external_id: The external ID of the document
+ instance_id: The instance ID of the document
Returns:
- Summary: A summary of the document.
+ A summary of the document.
Examples:
@@ -85,16 +85,16 @@ async def ask_question(
Supports up to 100 documents at a time.
Args:
- question (str): The question.
- id (int | Sequence[int] | None): The ID(s) of the document(s)
- external_id (str | Sequence[str] | None): The external ID(s) of the document(s)
- instance_id (NodeId | Sequence[NodeId] | None): The instance ID(s) of the document(s)
- language (AnswerLanguage | Literal['Chinese', 'Dutch', 'English', 'French', 'German', 'Italian', 'Japanese', 'Korean', 'Latvian', 'Norwegian', 'Portuguese', 'Spanish', 'Swedish']): The desired language of the answer, defaults to English.
- additional_context (str | None): Additional context that you want the LLM to take into account.
- ignore_unknown_ids (bool): Whether to skip documents that do not exist or that are not fully processed, instead of throwing an error. If no valid documents are found, an error will always be raised.
+ question: The question.
+ id: The ID(s) of the document(s)
+ external_id: The external ID(s) of the document(s)
+ instance_id: The instance ID(s) of the document(s)
+ language: The desired language of the answer, defaults to English.
+ additional_context: Additional context that you want the LLM to take into account.
+ ignore_unknown_ids: Whether to skip documents that do not exist or that are not fully processed, instead of throwing an error. If no valid documents are found, an error will always be raised.
Returns:
- Answer: The answer to the question in the form of a list of multiple content objects, each consisting of a chunk of text along with a set of references.
+ The answer to the question in the form of a list of multiple content objects, each consisting of a chunk of text along with a set of references.
Examples:
diff --git a/cognite/client/_api/annotations.py b/cognite/client/_api/annotations.py
index c730e834b5..37ee9218d1 100644
--- a/cognite/client/_api/annotations.py
+++ b/cognite/client/_api/annotations.py
@@ -42,10 +42,10 @@ async def create(
"""`Create annotations `_
Args:
- annotations (Annotation | AnnotationWrite | Sequence[Annotation | AnnotationWrite]): Annotation(s) to create
+ annotations: Annotation(s) to create
Returns:
- Annotation | AnnotationList: Created annotation(s)
+ Created annotation(s)
"""
assert_type(annotations, "annotations", [AnnotationCore, Sequence])
@@ -69,10 +69,10 @@ async def suggest(
"""`Suggest annotations `_
Args:
- annotations (Annotation | AnnotationWrite | Sequence[Annotation] | Sequence[AnnotationWrite]): annotation(s) to suggest. They must have status set to "suggested".
+ annotations: annotation(s) to suggest. They must have status set to "suggested".
Returns:
- Annotation | AnnotationList: suggested annotation(s)
+ suggested annotation(s)
"""
assert_type(annotations, "annotations", [Annotation, AnnotationWrite, Sequence])
# Deal with status fields in both cases: Single item and list of items
@@ -141,11 +141,11 @@ async def update(
"""`Update annotations `_
Args:
- item (Annotation | AnnotationWrite | AnnotationUpdate | Sequence[Annotation | AnnotationWrite | AnnotationUpdate]): Annotation or list of annotations to update (or patch or list of patches to apply)
- mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (Annotation or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`.
+ item: Annotation or list of annotations to update (or patch or list of patches to apply)
+ mode: How to update data when a non-update object is given (Annotation or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`.
Returns:
- Annotation | AnnotationList: No description."""
+ No description."""
return await self._update_multiple(
list_cls=AnnotationList, resource_cls=Annotation, update_cls=AnnotationUpdate, items=item, mode=mode
)
@@ -154,7 +154,7 @@ async def delete(self, id: int | Sequence[int]) -> None:
"""`Delete annotations `_
Args:
- id (int | Sequence[int]): ID or list of IDs to be deleted
+ id: ID or list of IDs to be deleted
"""
await self._delete_multiple(identifiers=IdentifierSequence.load(ids=id), wrap_ids=True)
@@ -162,10 +162,10 @@ async def retrieve_multiple(self, ids: Sequence[int]) -> AnnotationList:
"""`Retrieve annotations by IDs `_`
Args:
- ids (Sequence[int]): list of IDs to be retrieved
+ ids: list of IDs to be retrieved
Returns:
- AnnotationList: list of annotations
+ list of annotations
"""
identifiers = IdentifierSequence.load(ids=ids, external_ids=None)
return await self._retrieve_multiple(list_cls=AnnotationList, resource_cls=Annotation, identifiers=identifiers)
@@ -174,10 +174,10 @@ async def retrieve(self, id: int) -> Annotation | None:
"""`Retrieve an annotation by id `_
Args:
- id (int): id of the annotation to be retrieved
+ id: id of the annotation to be retrieved
Returns:
- Annotation | None: annotation requested
+ annotation requested
"""
identifiers = IdentifierSequence.load(ids=id, external_ids=None).as_singleton()
return await self._retrieve_multiple(list_cls=AnnotationList, resource_cls=Annotation, identifiers=identifiers)
@@ -188,11 +188,11 @@ async def reverse_lookup(
"""Reverse lookup annotated resources based on having annotations matching the filter.
Args:
- filter (AnnotationReverseLookupFilter): Filter to apply
- limit (int | None): Maximum number of results to return. Defaults to None (all).
+ filter: Filter to apply
+ limit: Maximum number of results to return. Defaults to None (all).
Returns:
- ResourceReferenceList: List of resource references
+ List of resource references
Examples:
@@ -225,11 +225,11 @@ async def list(self, filter: AnnotationFilter | dict, limit: int | None = DEFAUL
Passing a filter with both 'annotated_resource_type' and 'annotated_resource_ids' is always required.
Args:
- filter (AnnotationFilter | dict): Return annotations with parameter values that match what is specified.
- limit (int | None): Maximum number of annotations to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
+ filter: Return annotations with parameter values that match what is specified.
+ limit: Maximum number of annotations to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
Returns:
- AnnotationList: list of annotations
+ list of annotations
Example:
diff --git a/cognite/client/_api/assets.py b/cognite/client/_api/assets.py
index 09937332e2..ceb515e4ea 100644
--- a/cognite/client/_api/assets.py
+++ b/cognite/client/_api/assets.py
@@ -149,29 +149,29 @@ async def __call__(
Fetches assets as they are iterated over, so you keep a limited number of assets in memory.
Args:
- chunk_size (int | None): Number of assets to return in each chunk. Defaults to yielding one asset a time.
- name (str | None): Name of asset. Often referred to as tag.
- parent_ids (Sequence[int] | None): Return only the direct descendants of the specified assets.
- parent_external_ids (SequenceNotStr[str] | None): Return only the direct descendants of the specified assets.
- asset_subtree_ids (int | Sequence[int] | None): Only include assets in subtrees rooted at any of the specified assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned.
- asset_subtree_external_ids (str | SequenceNotStr[str] | None): Only include assets in subtrees rooted at any of the specified assetExternalIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned.
- metadata (dict[str, str] | None): Custom, application specific metadata. String key -> String value
- data_set_ids (int | Sequence[int] | None): Return only assets in the specified data set(s) with this id / these ids.
- data_set_external_ids (str | SequenceNotStr[str] | None): Return only assets in the specified data set(s) with this external id / these external ids.
- labels (LabelFilter | None): Return only the assets matching the specified label.
- geo_location (GeoLocationFilter | None): Only include files matching the specified geographic relation.
- source (str | None): The source of this asset
- created_time (TimestampRange | dict[str, Any] | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms.
- last_updated_time (TimestampRange | dict[str, Any] | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms.
- root (bool | None): filtered assets are root assets or not
- external_id_prefix (str | None): Filter by this (case-sensitive) prefix for the external ID.
- aggregated_properties (Sequence[AggregateAssetProperty] | None): Set of aggregated properties to include. Options are childCount, path, depth.
- limit (int | None): Maximum number of assets to return. Defaults to return all items.
- advanced_filter (Filter | dict[str, Any] | None): Advanced filter query using the filter DSL (Domain Specific Language). It allows defining complex filtering expressions that combine simple operations, such as equals, prefix, exists, etc., using boolean operators and, or, and not.
- sort (SortSpec | list[SortSpec] | None): The criteria to sort by. Defaults to desc for `_score_` and asc for all other properties. Sort is not allowed if `partitions` is used.
+ chunk_size: Number of assets to return in each chunk. Defaults to yielding one asset a time.
+ name: Name of asset. Often referred to as tag.
+ parent_ids: Return only the direct descendants of the specified assets.
+ parent_external_ids: Return only the direct descendants of the specified assets.
+ asset_subtree_ids: Only include assets in subtrees rooted at any of the specified assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned.
+ asset_subtree_external_ids: Only include assets in subtrees rooted at any of the specified assetExternalIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned.
+ metadata: Custom, application specific metadata. String key -> String value
+ data_set_ids: Return only assets in the specified data set(s) with this id / these ids.
+ data_set_external_ids: Return only assets in the specified data set(s) with this external id / these external ids.
+ labels: Return only the assets matching the specified label.
+ geo_location: Only include files matching the specified geographic relation.
+ source: The source of this asset
+ created_time: Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms.
+ last_updated_time: Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms.
+ root: filtered assets are root assets or not
+ external_id_prefix: Filter by this (case-sensitive) prefix for the external ID.
+ aggregated_properties: Set of aggregated properties to include. Options are childCount, path, depth.
+ limit: Maximum number of assets to return. Defaults to return all items.
+ advanced_filter: Advanced filter query using the filter DSL (Domain Specific Language). It allows defining complex filtering expressions that combine simple operations, such as equals, prefix, exists, etc., using boolean operators and, or, and not.
+ sort: The criteria to sort by. Defaults to desc for `_score_` and asc for all other properties. Sort is not allowed if `partitions` is used.
Yields:
- Asset | AssetList: yields Asset one by one if chunk_size is not specified, else AssetList objects.
+ yields Asset one by one if chunk_size is not specified, else AssetList objects.
""" # noqa: DOC404
agg_props = self._process_aggregated_props(aggregated_properties)
asset_subtree_ids_processed = process_asset_subtree_ids(asset_subtree_ids, asset_subtree_external_ids)
@@ -213,11 +213,11 @@ async def retrieve(self, id: int | None = None, external_id: str | None = None)
"""`Retrieve a single asset by id. `_
Args:
- id (int | None): ID
- external_id (str | None): External ID
+ id: ID
+ external_id: External ID
Returns:
- Asset | None: Requested asset or None if it does not exist.
+ Requested asset or None if it does not exist.
Examples:
@@ -244,12 +244,12 @@ async def retrieve_multiple(
"""`Retrieve multiple assets by id. `_
Args:
- ids (Sequence[int] | None): IDs
- external_ids (SequenceNotStr[str] | None): External IDs
- ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception.
+ ids: IDs
+ external_ids: External IDs
+ ignore_unknown_ids: Ignore IDs and external IDs that are not found rather than throw an exception.
Returns:
- AssetList: The requested assets.
+ The requested assets.
Examples:
@@ -278,12 +278,12 @@ async def aggregate_count(
"""`Count of assets matching the specified filters. `_
Args:
- property (AssetPropertyLike | None): If specified, get an approximate number of asset with a specific property (property is not null) and matching the filters.
- advanced_filter (Filter | dict[str, Any] | None): The advanced filter to narrow down the assets to count.
- filter (AssetFilter | dict[str, Any] | None): The filter to narrow down the assets to count (strict matching).
+ property: If specified, get an approximate number of asset with a specific property (property is not null) and matching the filters.
+ advanced_filter: The advanced filter to narrow down the assets to count.
+ filter: The filter to narrow down the assets to count (strict matching).
Returns:
- int: The number of assets matching the specified filters.
+ The number of assets matching the specified filters.
Examples:
@@ -320,12 +320,12 @@ async def aggregate_cardinality_values(
"""`Find approximate property count for assets. `_
Args:
- property (AssetPropertyLike): The property to count the cardinality of.
- advanced_filter (Filter | dict[str, Any] | None): The advanced filter to narrow down assets.
- aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets.
- filter (AssetFilter | dict[str, Any] | None): The filter to narrow down assets (strict matching).
+ property: The property to count the cardinality of.
+ advanced_filter: The advanced filter to narrow down assets.
+ aggregate_filter: The filter to apply to the resulting buckets.
+ filter: The filter to narrow down assets (strict matching).
Returns:
- int: The number of properties matching the specified filters and search.
+ The number of properties matching the specified filters and search.
Examples:
@@ -365,13 +365,12 @@ async def aggregate_cardinality_properties(
"""`Find approximate paths count for assets. `_
Args:
- path (AssetPropertyLike): The scope in every document to aggregate properties. The only value allowed now is ["metadata"].
- It means to aggregate only metadata properties (aka keys).
- advanced_filter (Filter | dict[str, Any] | None): The advanced filter to narrow down assets.
- aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets.
- filter (AssetFilter | dict[str, Any] | None): The filter to narrow down assets (strict matching).
+ path: The scope in every document to aggregate properties. The only value allowed now is ["metadata"]. It means to aggregate only metadata properties (aka keys).
+ advanced_filter: The advanced filter to narrow down assets.
+ aggregate_filter: The filter to apply to the resulting buckets.
+ filter: The filter to narrow down assets (strict matching).
Returns:
- int: The number of properties matching the specified filters.
+ The number of properties matching the specified filters.
Examples:
@@ -405,13 +404,13 @@ async def aggregate_unique_values(
In the case of text fields, the values are aggregated in a case-insensitive manner.
Args:
- property (AssetPropertyLike): The property to group by.
- advanced_filter (Filter | dict[str, Any] | None): The advanced filter to narrow down assets.
- aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets.
- filter (AssetFilter | dict[str, Any] | None): The filter to narrow down assets (strict matching).
+ property: The property to group by.
+ advanced_filter: The advanced filter to narrow down assets.
+ aggregate_filter: The filter to apply to the resulting buckets.
+ filter: The filter to narrow down assets (strict matching).
Returns:
- UniqueResultList: List of unique values of assets matching the specified filters and search.
+ List of unique values of assets matching the specified filters and search.
Examples:
@@ -468,14 +467,13 @@ async def aggregate_unique_properties(
In the case of text fields, the values are aggregated in a case-insensitive manner.
Args:
- path (AssetPropertyLike): The scope in every document to aggregate properties. The only value allowed now is ["metadata"].
- It means to aggregate only metadata properties (aka keys).
- advanced_filter (Filter | dict[str, Any] | None): The advanced filter to narrow down assets.
- aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets.
- filter (AssetFilter | dict[str, Any] | None): The filter to narrow down assets (strict matching).
+ path: The scope in every document to aggregate properties. The only value allowed now is ["metadata"]. It means to aggregate only metadata properties (aka keys).
+ advanced_filter: The advanced filter to narrow down assets.
+ aggregate_filter: The filter to apply to the resulting buckets.
+ filter: The filter to narrow down assets (strict matching).
Returns:
- UniqueResultList: List of unique values of assets matching the specified filters and search.
+ List of unique values of assets matching the specified filters and search.
Examples:
@@ -509,10 +507,10 @@ async def create(self, asset: Asset | AssetWrite | Sequence[Asset] | Sequence[As
When specifying parent-child relation between assets using `parentExternalId` the link will be resvoled into an internal ID and stored as `parentId`.
Args:
- asset (Asset | AssetWrite | Sequence[Asset] | Sequence[AssetWrite]): Asset or list of assets to create.
+ asset: Asset or list of assets to create.
Returns:
- Asset | AssetList: Created asset(s)
+ Created asset(s)
Examples:
@@ -552,12 +550,12 @@ async def create_hierarchy(
assets, so you may pass zero, one or many (same goes for the non-root assets).
Args:
- assets (Sequence[AssetWrite] | AssetHierarchy): List of assets to create or an instance of AssetHierarchy.
- upsert (bool): If used, already existing assets will be updated instead of an exception being raised. You may control how updates are applied with the 'upsert_mode' argument.
- upsert_mode (Literal['patch', 'replace']): Only applicable with upsert. Pass 'patch' to only update fields with non-null values (default), or 'replace' to do full updates (unset fields become null or empty).
+ assets: List of assets to create or an instance of AssetHierarchy.
+ upsert: If used, already existing assets will be updated instead of an exception being raised. You may control how updates are applied with the 'upsert_mode' argument.
+ upsert_mode: Only applicable with upsert. Pass 'patch' to only update fields with non-null values (default), or 'replace' to do full updates (unset fields become null or empty).
Returns:
- AssetList: Created (and possibly updated) asset hierarchy
+ Created (and possibly updated) asset hierarchy
Prior to insertion, this function will run validation on the given assets and raise an error if any of
the following issues are found:
@@ -680,10 +678,10 @@ async def delete(
"""`Delete one or more assets `_
Args:
- id (int | Sequence[int] | None): Id or list of ids
- external_id (str | SequenceNotStr[str] | None): External ID or list of external ids
- recursive (bool): Recursively delete whole asset subtrees under given ids. Defaults to False.
- ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception.
+ id: Id or list of ids
+ external_id: External ID or list of external ids
+ recursive: Recursively delete whole asset subtrees under given ids. Defaults to False.
+ ignore_unknown_ids: Ignore IDs and external IDs that are not found rather than throw an exception.
Examples:
@@ -723,10 +721,10 @@ async def update(
Labels can be added, removed or replaced (set). Note that set operation deletes all the existing labels and adds the new specified labels.
Args:
- item (Asset | AssetWrite | AssetUpdate | Sequence[Asset | AssetWrite | AssetUpdate]): Asset(s) to update
- mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (Asset or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`.
+ item: Asset(s) to update
+ mode: How to update data when a non-update object is given (Asset or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`.
Returns:
- Asset | AssetList: Updated asset(s)
+ Updated asset(s)
Examples:
Perform a partial update on an asset, updating the description and adding a new field to metadata:
@@ -791,11 +789,11 @@ async def upsert(
For more details, see :ref:`appendix-upsert`.
Args:
- item (Asset | AssetWrite | Sequence[Asset | AssetWrite]): Asset or list of assets to upsert.
- mode (Literal['patch', 'replace']): Whether to patch or replace in the case the assets are existing. If you set 'patch', the call will only update fields with non-null values (default). Setting 'replace' will unset any fields that are not specified.
+ item: Asset or list of assets to upsert.
+ mode: Whether to patch or replace in the case the assets are existing. If you set 'patch', the call will only update fields with non-null values (default). Setting 'replace' will unset any fields that are not specified.
Returns:
- Asset | AssetList: The upserted asset(s).
+ The upserted asset(s).
Examples:
@@ -834,14 +832,14 @@ async def search(
Primarily meant for human-centric use-cases and data exploration, not for programs, since matching and ordering may change over time. Use the `list` function if stable or exact matches are required.
Args:
- name (str | None): Fuzzy match on name.
- description (str | None): Fuzzy match on description.
- query (str | None): Whitespace-separated terms to search for in assets. Does a best-effort fuzzy search in relevant fields (currently name and description) for variations of any of the search terms, and orders results by relevance.
- filter (AssetFilter | dict[str, Any] | None): Filter to apply. Performs exact match on these fields.
- limit (int): Maximum number of results to return.
+ name: Fuzzy match on name.
+ description: Fuzzy match on description.
+ query: Whitespace-separated terms to search for in assets. Does a best-effort fuzzy search in relevant fields (currently name and description) for variations of any of the search terms, and orders results by relevance.
+ filter: Filter to apply. Performs exact match on these fields.
+ limit: Maximum number of results to return.
Returns:
- AssetList: List of requested assets
+ List of requested assets
Examples:
@@ -882,12 +880,12 @@ async def retrieve_subtree(
"""Retrieve the subtree for this asset up to a specified depth.
Args:
- id (int | None): Id of the root asset in the subtree.
- external_id (str | None): External id of the root asset in the subtree.
- depth (int | None): Retrieve assets up to this depth below the root asset in the subtree. Omit to get the entire subtree.
+ id: Id of the root asset in the subtree.
+ external_id: External id of the root asset in the subtree.
+ depth: Retrieve assets up to this depth below the root asset in the subtree. Omit to get the entire subtree.
Returns:
- AssetList: The requested assets or empty AssetList if asset does not exist.
+ The requested assets or empty AssetList if asset does not exist.
"""
asset = await self.retrieve(id=id, external_id=external_id)
if asset is None:
@@ -941,29 +939,29 @@ async def list(
"""`List assets `_
Args:
- name (str | None): Name of asset. Often referred to as tag.
- parent_ids (Sequence[int] | None): Return only the direct descendants of the specified assets.
- parent_external_ids (SequenceNotStr[str] | None): Return only the direct descendants of the specified assets.
- asset_subtree_ids (int | Sequence[int] | None): Only include assets in subtrees rooted at any of the specified assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned.
- asset_subtree_external_ids (str | SequenceNotStr[str] | None): Only include assets in subtrees rooted at any of the specified assetExternalIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned.
- data_set_ids (int | Sequence[int] | None): Return only assets in the specified data set(s) with this id / these ids.
- data_set_external_ids (str | SequenceNotStr[str] | None): Return only assets in the specified data set(s) with this external id / these external ids.
- labels (LabelFilter | None): Return only the assets matching the specified label filter.
- geo_location (GeoLocationFilter | None): Only include files matching the specified geographic relation.
- metadata (dict[str, str] | None): Custom, application specific metadata. String key -> String value.
- source (str | None): The source of this asset.
- created_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms.
- last_updated_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms.
- root (bool | None): filtered assets are root assets or not.
- external_id_prefix (str | None): Filter by this (case-sensitive) prefix for the external ID.
- aggregated_properties (Sequence[AggregateAssetProperty] | None): Set of aggregated properties to include. Options are childCount, path, depth.
- partitions (int | None): Retrieve resources in parallel using this number of workers (values up to 10 allowed), limit must be set to `None` (or `-1`).
- limit (int | None): Maximum number of assets to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
- advanced_filter (Filter | dict[str, Any] | None): Advanced filter query using the filter DSL (Domain Specific Language). It allows defining complex filtering expressions that combine simple operations, such as equals, prefix, exists, etc., using boolean operators and, or, and not. See examples below for usage.
- sort (SortSpec | list[SortSpec] | None): The criteria to sort by. Defaults to desc for `_score_` and asc for all other properties. Sort is not allowed if `partitions` is used.
+ name: Name of asset. Often referred to as tag.
+ parent_ids: Return only the direct descendants of the specified assets.
+ parent_external_ids: Return only the direct descendants of the specified assets.
+ asset_subtree_ids: Only include assets in subtrees rooted at any of the specified assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned.
+ asset_subtree_external_ids: Only include assets in subtrees rooted at any of the specified assetExternalIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned.
+ data_set_ids: Return only assets in the specified data set(s) with this id / these ids.
+ data_set_external_ids: Return only assets in the specified data set(s) with this external id / these external ids.
+ labels: Return only the assets matching the specified label filter.
+ geo_location: Only include files matching the specified geographic relation.
+ metadata: Custom, application specific metadata. String key -> String value.
+ source: The source of this asset.
+ created_time: Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms.
+ last_updated_time: Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms.
+ root: filtered assets are root assets or not.
+ external_id_prefix: Filter by this (case-sensitive) prefix for the external ID.
+ aggregated_properties: Set of aggregated properties to include. Options are childCount, path, depth.
+ partitions: Retrieve resources in parallel using this number of workers (values up to 10 allowed), limit must be set to `None` (or `-1`).
+ limit: Maximum number of assets to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
+ advanced_filter: Advanced filter query using the filter DSL (Domain Specific Language). It allows defining complex filtering expressions that combine simple operations, such as equals, prefix, exists, etc., using boolean operators and, or, and not. See examples below for usage.
+ sort: The criteria to sort by. Defaults to desc for `_score_` and asc for all other properties. Sort is not allowed if `partitions` is used.
Returns:
- AssetList: List of requested assets
+ List of requested assets
.. note::
When using `partitions`, there are few considerations to keep in mind:
diff --git a/cognite/client/_api/data_modeling/containers.py b/cognite/client/_api/data_modeling/containers.py
index a5329ef52a..4a2a5c76b8 100644
--- a/cognite/client/_api/data_modeling/containers.py
+++ b/cognite/client/_api/data_modeling/containers.py
@@ -72,13 +72,13 @@ async def __call__(
Fetches containers as they are iterated over, so you keep a limited number of containers in memory.
Args:
- chunk_size (int | None): Number of containers to return in each chunk. Defaults to yielding one container a time.
- space (str | None): The space to query.
- include_global (bool): Whether the global containers should be returned.
- limit (int | None): Maximum number of containers to return. Defaults to returning all items.
+ chunk_size: Number of containers to return in each chunk. Defaults to yielding one container a time.
+ space: The space to query.
+ include_global: Whether the global containers should be returned.
+ limit: Maximum number of containers to return. Defaults to returning all items.
Yields:
- Container | ContainerList: yields Container one by one if chunk_size is not specified, else ContainerList objects.
+ yields Container one by one if chunk_size is not specified, else ContainerList objects.
""" # noqa: DOC404
flt = _ContainerFilter(space, include_global)
async for item in self._list_generator(
@@ -104,10 +104,10 @@ async def retrieve(
"""`Retrieve one or more container by id(s). `_
Args:
- ids (ContainerIdentifier | Sequence[ContainerIdentifier]): Identifier for container(s).
+ ids: Identifier for container(s).
Returns:
- Container | ContainerList | None: Requested container or None if it does not exist.
+ Requested container or None if it does not exist.
Examples:
@@ -134,9 +134,9 @@ async def delete(self, ids: ContainerIdentifier | Sequence[ContainerIdentifier])
"""`Delete one or more containers `_
Args:
- ids (ContainerIdentifier | Sequence[ContainerIdentifier]): The container identifier(s).
+ ids: The container identifier(s).
Returns:
- list[ContainerId]: The container(s) which has been deleted. Empty list if nothing was deleted.
+ The container(s) which has been deleted. Empty list if nothing was deleted.
Examples:
Delete containers by id:
@@ -161,9 +161,9 @@ async def delete_constraints(self, ids: Sequence[ConstraintIdentifier]) -> list[
"""`Delete one or more constraints `_
Args:
- ids (Sequence[ConstraintIdentifier]): The constraint identifier(s).
+ ids: The constraint identifier(s).
Returns:
- list[ConstraintIdentifier]: The constraints(s) which have been deleted.
+ The constraints(s) which have been deleted.
Examples:
Delete constraints by id:
@@ -181,9 +181,9 @@ async def delete_indexes(self, ids: Sequence[IndexIdentifier]) -> list[IndexIden
"""`Delete one or more indexes `_
Args:
- ids (Sequence[IndexIdentifier]): The index identifier(s).
+ ids: The index identifier(s).
Returns:
- list[IndexIdentifier]: The indexes(s) which has been deleted.
+ The indexes(s) which has been deleted.
Examples:
Delete indexes by id:
@@ -229,12 +229,12 @@ async def list(
"""`List containers `_
Args:
- space (str | None): The space to query
- limit (int | None): Maximum number of containers to return. Defaults to 10. Set to -1, float("inf") or None to return all items.
- include_global (bool): Whether the global containers should be returned.
+ space: The space to query
+ limit: Maximum number of containers to return. Defaults to 10. Set to -1, float("inf") or None to return all items.
+ include_global: Whether the global containers should be returned.
Returns:
- ContainerList: List of requested containers
+ List of requested containers
Examples:
@@ -275,10 +275,10 @@ async def apply(self, container: ContainerApply | Sequence[ContainerApply]) -> C
"""`Add or update (upsert) containers. `_
Args:
- container (ContainerApply | Sequence[ContainerApply]): Container(s) to create or update.
+ container: Container(s) to create or update.
Returns:
- Container | ContainerList: Created container(s)
+ Created container(s)
Examples:
diff --git a/cognite/client/_api/data_modeling/data_models.py b/cognite/client/_api/data_modeling/data_models.py
index 28d63b7105..0f715759be 100644
--- a/cognite/client/_api/data_modeling/data_models.py
+++ b/cognite/client/_api/data_modeling/data_models.py
@@ -73,15 +73,15 @@ async def __call__(
Fetches data model as they are iterated over, so you keep a limited number of data model in memory.
Args:
- chunk_size (int | None): Number of data model to return in each chunk. Defaults to yielding one data_model a time.
- limit (int | None): Maximum number of data model to return. Defaults to returning all items.
- space (str | None): The space to query.
- inline_views (bool): Whether to expand the referenced views inline in the returned result.
- all_versions (bool): Whether to return all versions. If false, only the newest version is returned, which is determined based on the 'createdTime' field.
- include_global (bool): Whether to include global views.
+ chunk_size: Number of data model to return in each chunk. Defaults to yielding one data_model a time.
+ limit: Maximum number of data model to return. Defaults to returning all items.
+ space: The space to query.
+ inline_views: Whether to expand the referenced views inline in the returned result.
+ all_versions: Whether to return all versions. If false, only the newest version is returned, which is determined based on the 'createdTime' field.
+ include_global: Whether to include global views.
Yields:
- DataModel | DataModelList: yields DataModel one by one if chunk_size is not specified, else DataModelList objects.
+ yields DataModel one by one if chunk_size is not specified, else DataModelList objects.
""" # noqa: DOC404
filter = DataModelFilter(space, inline_views, all_versions, include_global)
@@ -112,11 +112,11 @@ async def retrieve(
"""`Retrieve data_model(s) by id(s). `_
Args:
- ids (DataModelIdentifier | Sequence[DataModelIdentifier]): Data Model identifier(s).
- inline_views (bool): Whether to expand the referenced views inline in the returned result.
+ ids: Data Model identifier(s).
+ inline_views: Whether to expand the referenced views inline in the returned result.
Returns:
- DataModelList[ViewId] | DataModelList[View]: Requested data model(s) or empty if none exist.
+ Requested data model(s) or empty if none exist.
Examples:
@@ -138,9 +138,9 @@ async def delete(self, ids: DataModelIdentifier | Sequence[DataModelIdentifier])
"""`Delete one or more data model `_
Args:
- ids (DataModelIdentifier | Sequence[DataModelIdentifier]): Data Model identifier(s).
+ ids: Data Model identifier(s).
Returns:
- list[DataModelId]: The data_model(s) which has been deleted. None if nothing was deleted.
+ The data_model(s) which has been deleted. None if nothing was deleted.
Examples:
Delete data model by id:
@@ -192,14 +192,14 @@ async def list(
"""`List data models `_
Args:
- inline_views (bool): Whether to expand the referenced views inline in the returned result.
- limit (int | None): Maximum number of data model to return. Defaults to 10. Set to -1, float("inf") or None to return all items.
- space (str | None): The space to query.
- all_versions (bool): Whether to return all versions. If false, only the newest version is returned, which is determined based on the 'createdTime' field.
- include_global (bool): Whether to include global data models.
+ inline_views: Whether to expand the referenced views inline in the returned result.
+ limit: Maximum number of data model to return. Defaults to 10. Set to -1, float("inf") or None to return all items.
+ space: The space to query.
+ all_versions: Whether to return all versions. If false, only the newest version is returned, which is determined based on the 'createdTime' field.
+ include_global: Whether to include global data models.
Returns:
- DataModelList[View] | DataModelList[ViewId]: List of requested data models
+ List of requested data models
Examples:
@@ -241,10 +241,10 @@ async def apply(self, data_model: DataModelApply | Sequence[DataModelApply]) ->
"""`Create or update one or more data model. `_
Args:
- data_model (DataModelApply | Sequence[DataModelApply]): Data model(s) to create or update (upsert).
+ data_model: Data model(s) to create or update (upsert).
Returns:
- DataModel | DataModelList: Created data model(s)
+ Created data model(s)
Examples:
diff --git a/cognite/client/_api/data_modeling/graphql.py b/cognite/client/_api/data_modeling/graphql.py
index 58d0943c36..9c5e32d1c2 100644
--- a/cognite/client/_api/data_modeling/graphql.py
+++ b/cognite/client/_api/data_modeling/graphql.py
@@ -39,10 +39,10 @@ async def _unsafely_wipe_and_regenerate_dml(self, id: DataModelIdentifier) -> st
This removes all comments from the DML.
Args:
- id (DataModelIdentifier): The data model to apply DML to.
+ id: The data model to apply DML to.
Returns:
- str: The new DML
+ The new DML
"""
graphql_body = """
query WipeAndRegenerateDml($space: String!, $externalId: String!, $version: String!) {
@@ -82,14 +82,14 @@ async def apply_dml(
"""Apply the DML for a given data model.
Args:
- id (DataModelIdentifier): The data model to apply DML to.
- dml (str): The DML to apply.
- name (str | None): The name of the data model.
- description (str | None): The description of the data model.
- previous_version (str | None): The previous version of the data model. Specify to reuse view versions from previous data model version.
+ id: The data model to apply DML to.
+ dml: The DML to apply.
+ name: The name of the data model.
+ description: The description of the data model.
+ previous_version: The previous version of the data model. Specify to reuse view versions from previous data model version.
Returns:
- DMLApplyResult: The id of the updated data model.
+ The id of the updated data model.
Examples:
@@ -163,12 +163,12 @@ async def query(
"""Execute a GraphQl query against a given data model.
Args:
- id (DataModelIdentifier): The data model to query.
- query (str): The query to issue.
- variables (dict[str, Any] | None): An optional dict of variables to pass to the query.
+ id: The data model to query.
+ query: The query to issue.
+ variables: An optional dict of variables to pass to the query.
Returns:
- dict[str, Any]: The query result
+ The query result
Examples:
diff --git a/cognite/client/_api/data_modeling/instances.py b/cognite/client/_api/data_modeling/instances.py
index af949c4665..c6b652cfd5 100644
--- a/cognite/client/_api/data_modeling/instances.py
+++ b/cognite/client/_api/data_modeling/instances.py
@@ -257,18 +257,18 @@ async def __call__(
Fetches instances as they are iterated over, so you keep a limited number of instances in memory.
Args:
- chunk_size (int | None): Number of data_models to return in each chunk. Defaults to yielding one instance at a time.
- instance_type (Literal['node', 'edge']): Whether to query for nodes or edges.
- limit (int | None): Maximum number of instances to return. Defaults to returning all items.
- include_typing (bool): Whether to return property type information as part of the result.
- sources (Source | Sequence[Source] | None): Views to retrieve properties from.
- space (str | SequenceNotStr[str] | None): Only return instances in the given space (or list of spaces).
- sort (list[InstanceSort | dict] | InstanceSort | dict | None): Sort(s) to apply to the returned instances. For nontrivial amounts of data, you need to have a backing, cursorable index.
- filter (Filter | dict[str, Any] | None): Advanced filtering of instances.
- debug (DebugParameters | None): Debug settings for profiling and troubleshooting.
+ chunk_size: Number of data_models to return in each chunk. Defaults to yielding one instance at a time.
+ instance_type: Whether to query for nodes or edges.
+ limit: Maximum number of instances to return. Defaults to returning all items.
+ include_typing: Whether to return property type information as part of the result.
+ sources: Views to retrieve properties from.
+ space: Only return instances in the given space (or list of spaces).
+ sort: Sort(s) to apply to the returned instances. For nontrivial amounts of data, you need to have a backing, cursorable index.
+ filter: Advanced filtering of instances.
+ debug: Debug settings for profiling and troubleshooting.
Yields:
- Edge | EdgeList | Node | NodeList: yields Instance one by one if chunk_size is not specified, else NodeList/EdgeList objects.
+ yields Instance one by one if chunk_size is not specified, else NodeList/EdgeList objects.
"""
self._validate_filter(filter)
filter = self._merge_space_into_filter(instance_type, space, filter)
@@ -369,13 +369,13 @@ async def retrieve_edges(
Args:
- edges (EdgeId | Sequence[EdgeId] | tuple[str, str] | Sequence[tuple[str, str]]): Edge id(s) to retrieve.
- edge_cls (type[T_Edge]): The custom edge class to use, the retrieved edges will automatically be serialized into this class.
- sources (Source | Sequence[Source] | None): Retrieve properties from the listed - by reference - views. This only applies if you do not provide a custom edge class.
- include_typing (bool): Whether to include typing information
+ edges: Edge id(s) to retrieve.
+ edge_cls: The custom edge class to use, the retrieved edges will automatically be serialized into this class.
+ sources: Retrieve properties from the listed - by reference - views. This only applies if you do not provide a custom edge class.
+ include_typing: Whether to include typing information
Returns:
- EdgeList[T_Edge] | T_Edge | Edge | None: The requested edges.
+ The requested edges.
Examples:
@@ -473,13 +473,13 @@ async def retrieve_nodes(
built-in Node class.
Args:
- nodes (NodeId | Sequence[NodeId] | tuple[str, str] | Sequence[tuple[str, str]]): Node id(s) to retrieve.
- node_cls (type[T_Node]): The custom node class to use, the retrieved nodes will automatically be serialized to this class.
- sources (Source | Sequence[Source] | None): Retrieve properties from the listed - by reference - views. This only applies if you do not provide a custom node class.
- include_typing (bool): Whether to include typing information
+ nodes: Node id(s) to retrieve.
+ node_cls: The custom node class to use, the retrieved nodes will automatically be serialized to this class.
+ sources: Retrieve properties from the listed - by reference - views. This only applies if you do not provide a custom node class.
+ include_typing: Whether to include typing information
Returns:
- NodeList[T_Node] | T_Node | Node | None: The requested edges.
+ The requested edges.
Examples:
@@ -544,13 +544,13 @@ async def retrieve(
"""`Retrieve one or more instance by id(s). `_
Args:
- nodes (NodeId | Sequence[NodeId] | tuple[str, str] | Sequence[tuple[str, str]] | None): Node ids
- edges (EdgeId | Sequence[EdgeId] | tuple[str, str] | Sequence[tuple[str, str]] | None): Edge ids
- sources (Source | Sequence[Source] | None): Retrieve properties from the listed - by reference - views.
- include_typing (bool): Whether to return property type information as part of the result.
+ nodes: Node ids
+ edges: Edge ids
+ sources: Retrieve properties from the listed - by reference - views.
+ include_typing: Whether to return property type information as part of the result.
Returns:
- InstancesResult[Node, Edge]: Requested instances.
+ Requested instances.
Examples:
@@ -692,11 +692,11 @@ async def delete(
"""`Delete one or more instances `_
Args:
- nodes (NodeId | Sequence[NodeId] | tuple[str, str] | Sequence[tuple[str, str]] | None): Node ids
- edges (EdgeId | Sequence[EdgeId] | tuple[str, str] | Sequence[tuple[str, str]] | None): Edge ids
+ nodes: Node ids
+ edges: Edge ids
Returns:
- InstancesDeleteResult: The instance ID(s) that was deleted. Empty list if nothing was deleted.
+ The instance ID(s) that was deleted. Empty list if nothing was deleted.
Examples:
@@ -745,13 +745,13 @@ async def inspect(
This method will return the involved views and containers for the given nodes and edges.
Args:
- nodes (NodeId | Sequence[NodeId] | tuple[str, str] | Sequence[tuple[str, str]] | None): Node IDs.
- edges (EdgeId | Sequence[EdgeId] | tuple[str, str] | Sequence[tuple[str, str]] | None): Edge IDs.
- involved_views (InvolvedViews | None): Whether to include involved views. Must pass at least one of involved_views or involved_containers.
- involved_containers (InvolvedContainers | None): Whether to include involved containers. Must pass at least one of involved_views or involved_containers.
+ nodes: Node IDs.
+ edges: Edge IDs.
+ involved_views: Whether to include involved views. Must pass at least one of involved_views or involved_containers.
+ involved_containers: Whether to include involved containers. Must pass at least one of involved_views or involved_containers.
Returns:
- InstanceInspectResults: List of instance inspection results.
+ List of instance inspection results.
Examples:
@@ -817,13 +817,13 @@ async def subscribe(
see :ref:`this example of syncing instances to a local SQLite database `.
Args:
- query (QuerySync): The query to subscribe to.
- callback (Callable[[QueryResult], None | Awaitable[None]]): The callback function to call when the result set changes. Can be a regular or async function.
- poll_delay_seconds (float): The time to wait between polls when no data is present. Defaults to 30 seconds.
- throttle_seconds (float): The time to wait between polls despite data being present.
+ query: The query to subscribe to.
+ callback: The callback function to call when the result set changes. Can be a regular or async function.
+ poll_delay_seconds: The time to wait between polls when no data is present. Defaults to 30 seconds.
+ throttle_seconds: The time to wait between polls despite data being present.
Returns:
- SubscriptionContext: An object that can be used to inspect and cancel the subscription.
+ An object that can be used to inspect and cancel the subscription.
Examples:
@@ -955,16 +955,16 @@ async def apply(
"""`Add or update (upsert) instances. `_
Args:
- nodes (NodeApply | Sequence[NodeApply] | None): Nodes to apply
- edges (EdgeApply | Sequence[EdgeApply] | None): Edges to apply
- auto_create_start_nodes (bool): Whether to create missing start nodes for edges when ingesting. By default, the start node of an edge must exist before it can be ingested.
- auto_create_end_nodes (bool): Whether to create missing end nodes for edges when ingesting. By default, the end node of an edge must exist before it can be ingested.
- auto_create_direct_relations (bool): Whether to create missing direct relation targets when ingesting.
- skip_on_version_conflict (bool): If existingVersion is specified on any of the nodes/edges in the input, the default behaviour is that the entire ingestion will fail when version conflicts occur. If skipOnVersionConflict is set to true, items with version conflicts will be skipped instead. If no version is specified for nodes/edges, it will do the writing directly.
- replace (bool): How do we behave when a property value exists? Do we replace all matching and existing values with the supplied values (true)? Or should we merge in new values for properties together with the existing values (false)? Note: This setting applies for all nodes or edges specified in the ingestion call.
+ nodes: Nodes to apply
+ edges: Edges to apply
+ auto_create_start_nodes: Whether to create missing start nodes for edges when ingesting. By default, the start node of an edge must exist before it can be ingested.
+ auto_create_end_nodes: Whether to create missing end nodes for edges when ingesting. By default, the end node of an edge must exist before it can be ingested.
+ auto_create_direct_relations: Whether to create missing direct relation targets when ingesting.
+ skip_on_version_conflict: If existingVersion is specified on any of the nodes/edges in the input, the default behaviour is that the entire ingestion will fail when version conflicts occur. If skipOnVersionConflict is set to true, items with version conflicts will be skipped instead. If no version is specified for nodes/edges, it will do the writing directly.
+ replace: How do we behave when a property value exists? Do we replace all matching and existing values with the supplied values (true)? Or should we merge in new values for properties together with the existing values (false)? Note: This setting applies for all nodes or edges specified in the ingestion call.
Returns:
- InstancesApplyResult: Created instance(s)
+ Created instance(s)
Examples:
@@ -1164,24 +1164,20 @@ async def search(
"""`Search instances `_
Args:
- view (ViewId): View to search in.
- query (str | None): Query string that will be parsed and used for search.
- instance_type (Literal['node', 'edge'] | type[T_Node] | type[T_Edge]): Whether to search for nodes or edges. You can also pass a custom typed node (or edge class) inheriting from TypedNode (or TypedEdge). See apply, retrieve_nodes or retrieve_edges for an example.
- properties (list[str] | None): Optional array of properties you want to search through. If you do not specify one or more properties, the service will search all text fields within the view.
- target_units (list[TargetUnit] | None): Properties to convert to another unit. The API can only convert to another unit if a unit has been defined as part of the type on the underlying container being queried.
- space (str | SequenceNotStr[str] | None): Restrict instance search to the given space (or list of spaces).
- filter (Filter | dict[str, Any] | None): Advanced filtering of instances.
- include_typing (bool): Whether to include typing information.
- limit (int | None): Maximum number of instances to return. Defaults to 25. Will return the maximum number
- of results (1000) if set to None, -1, or math.inf.
- sort (Sequence[InstanceSort | dict] | InstanceSort | dict | None): How you want the listed instances information ordered.
- operator (Literal['AND', 'OR']): Controls how multiple search terms are combined when matching documents.
- AND (default): A document matches only if it contains all of the query terms across the searchable fields.
- This typically returns fewer results but with higher relevance. OR: A document matches if it contains any
- of the query terms in the searchable fields. This typically returns more results but with lower precision.
+ view: View to search in.
+ query: Query string that will be parsed and used for search.
+ instance_type: Whether to search for nodes or edges. You can also pass a custom typed node (or edge class) inheriting from TypedNode (or TypedEdge). See apply, retrieve_nodes or retrieve_edges for an example.
+ properties: Optional array of properties you want to search through. If you do not specify one or more properties, the service will search all text fields within the view.
+ target_units: Properties to convert to another unit. The API can only convert to another unit if a unit has been defined as part of the type on the underlying container being queried.
+ space: Restrict instance search to the given space (or list of spaces).
+ filter: Advanced filtering of instances.
+ include_typing: Whether to include typing information.
+ limit: Maximum number of instances to return. Defaults to 25. Will return the maximum number of results (1000) if set to None, -1, or math.inf.
+ sort: How you want the listed instances information ordered.
+ operator: Controls how multiple search terms are combined when matching documents. AND (default): A document matches only if it contains all of the query terms across the searchable fields. This typically returns fewer results but with higher relevance. OR: A document matches if it contains any of the query terms in the searchable fields. This typically returns more results but with lower precision.
Returns:
- NodeList[T_Node] | EdgeList[T_Edge]: Search result with matching nodes or edges.
+ Search result with matching nodes or edges.
Examples:
@@ -1330,20 +1326,19 @@ async def aggregate(
"""`Aggregate data across nodes/edges `_
Args:
- view (ViewId): View to aggregate over.
- aggregates (MetricAggregation | dict | Sequence[MetricAggregation | dict]): The properties to aggregate over.
- group_by (str | SequenceNotStr[str] | None): The selection of fields to group the results by when doing aggregations. You can specify up to 5 items to group by.
- instance_type (Literal['node', 'edge']): The type of instance.
- query (str | None): Optional query string. The API will parse the query string, and use it to match the text properties on elements to use for the aggregate(s).
- properties (str | SequenceNotStr[str] | None): Optional list of properties you want to apply the query to. If you do not list any properties, you search through text fields by default.
- target_units (list[TargetUnit] | None): Properties to convert to another unit. The API can only convert to another unit if a unit has been defined as part of the type on the underlying container being queried.
- space (str | SequenceNotStr[str] | None): Restrict instance aggregate query to the given space (or list of spaces).
- filter (Filter | dict[str, Any] | None): Advanced filtering of instances.
- limit (int | None): Maximum number of instances to return. Defaults to 25. Will return the maximum number
- of results (1000) if set to None, -1, or math.inf.
+ view: View to aggregate over.
+ aggregates: The properties to aggregate over.
+ group_by: The selection of fields to group the results by when doing aggregations. You can specify up to 5 items to group by.
+ instance_type: The type of instance.
+ query: Optional query string. The API will parse the query string, and use it to match the text properties on elements to use for the aggregate(s).
+ properties: Optional list of properties you want to apply the query to. If you do not list any properties, you search through text fields by default.
+ target_units: Properties to convert to another unit. The API can only convert to another unit if a unit has been defined as part of the type on the underlying container being queried.
+ space: Restrict instance aggregate query to the given space (or list of spaces).
+ filter: Advanced filtering of instances.
+ limit: Maximum number of instances to return. Defaults to 25. Will return the maximum number of results (1000) if set to None, -1, or math.inf.
Returns:
- AggregatedNumberedValue | list[AggregatedNumberedValue] | InstanceAggregationResultList: Node or edge aggregation results.
+ Node or edge aggregation results.
Examples:
@@ -1441,18 +1436,18 @@ async def histogram(
"""`Produces histograms for nodes/edges `_
Args:
- view (ViewId): View to to aggregate over.
- histograms (Histogram | Sequence[Histogram]): The properties to aggregate over.
- instance_type (Literal['node', 'edge']): Whether to search for nodes or edges.
- query (str | None): Query string that will be parsed and used for search.
- properties (SequenceNotStr[str] | None): Optional array of properties you want to search through. If you do not specify one or more properties, the service will search all text fields within the view.
- target_units (list[TargetUnit] | None): Properties to convert to another unit. The API can only convert to another unit if a unit has been defined as part of the type on the underlying container being queried.
- space (str | SequenceNotStr[str] | None): Restrict histogram query to instances in the given space (or list of spaces).
- filter (Filter | dict[str, Any] | None): Advanced filtering of instances.
- limit (int): Maximum number of instances to return. Defaults to 25.
+ view: View to to aggregate over.
+ histograms: The properties to aggregate over.
+ instance_type: Whether to search for nodes or edges.
+ query: Query string that will be parsed and used for search.
+ properties: Optional array of properties you want to search through. If you do not specify one or more properties, the service will search all text fields within the view.
+ target_units: Properties to convert to another unit. The API can only convert to another unit if a unit has been defined as part of the type on the underlying container being queried.
+ space: Restrict histogram query to instances in the given space (or list of spaces).
+ filter: Advanced filtering of instances.
+ limit: Maximum number of instances to return. Defaults to 25.
Returns:
- HistogramValue | list[HistogramValue]: Node or edge aggregation results.
+ Node or edge aggregation results.
Examples:
@@ -1513,12 +1508,12 @@ async def query(
recursive edge traversal, chaining of result sets, and granular property selection.
Args:
- query (Query): Query.
- include_typing (bool): Should we return property type information as part of the result?
- debug (DebugParameters | None): Debug settings for profiling and troubleshooting.
+ query: Query.
+ include_typing: Should we return property type information as part of the result?
+ debug: Debug settings for profiling and troubleshooting.
Returns:
- QueryResult: The resulting nodes and/or edges from the query.
+ The resulting nodes and/or edges from the query.
Examples:
@@ -1587,12 +1582,12 @@ async def sync(
Subscribe to changes for nodes and edges in a project, matching a supplied filter.
Args:
- query (QuerySync): Query.
- include_typing (bool): Should we return property type information as part of the result?
- debug (DebugParameters | None): Debug settings for profiling and troubleshooting.
+ query: Query.
+ include_typing: Should we return property type information as part of the result?
+ debug: Debug settings for profiling and troubleshooting.
Returns:
- QueryResult: The resulting nodes and/or edges from the query.
+ The resulting nodes and/or edges from the query.
Examples:
@@ -1734,17 +1729,17 @@ async def list(
"""`List instances `_
Args:
- instance_type (Literal['node', 'edge'] | type[T_Node] | type[T_Edge]): Whether to query for nodes or edges. You can also pass a custom typed node (or edge class) inheriting from TypedNode (or TypedEdge). See apply, retrieve_nodes or retrieve_edges for an example.
- include_typing (bool): Whether to return property type information as part of the result.
- sources (Source | Sequence[Source] | None): Views to retrieve properties from.
- space (str | SequenceNotStr[str] | None): Only return instances in the given space (or list of spaces).
- limit (int | None): Maximum number of instances to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
- sort (Sequence[InstanceSort | dict] | InstanceSort | dict | None): How you want the listed instances information ordered.
- filter (Filter | dict[str, Any] | None): Advanced filtering of instances.
- debug (DebugParameters | None): Debug settings for profiling and troubleshooting.
+ instance_type: Whether to query for nodes or edges. You can also pass a custom typed node (or edge class) inheriting from TypedNode (or TypedEdge). See apply, retrieve_nodes or retrieve_edges for an example.
+ include_typing: Whether to return property type information as part of the result.
+ sources: Views to retrieve properties from.
+ space: Only return instances in the given space (or list of spaces).
+ limit: Maximum number of instances to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
+ sort: How you want the listed instances information ordered.
+ filter: Advanced filtering of instances.
+ debug: Debug settings for profiling and troubleshooting.
Returns:
- NodeList[T_Node] | EdgeList[T_Edge]: List of requested instances
+ List of requested instances
Examples:
diff --git a/cognite/client/_api/data_modeling/space_statistics.py b/cognite/client/_api/data_modeling/space_statistics.py
index fd50a31b14..3d7ccccb3b 100644
--- a/cognite/client/_api/data_modeling/space_statistics.py
+++ b/cognite/client/_api/data_modeling/space_statistics.py
@@ -44,10 +44,10 @@ async def retrieve(
"""`Retrieve usage data and limits per space `_
Args:
- space (str | SequenceNotStr[str]): The space or spaces to retrieve statistics for.
+ space: The space or spaces to retrieve statistics for.
Returns:
- SpaceStatistics | SpaceStatisticsList | None: The requested statistics and limits for the specified space(s).
+ The requested statistics and limits for the specified space(s).
Examples:
@@ -78,7 +78,7 @@ async def list(self) -> SpaceStatisticsList:
Returns statistics for data modeling resources grouped by each space in the project.
Returns:
- SpaceStatisticsList: The requested statistics and limits for all spaces in the project.
+ The requested statistics and limits for all spaces in the project.
Examples:
diff --git a/cognite/client/_api/data_modeling/spaces.py b/cognite/client/_api/data_modeling/spaces.py
index ebda5c59eb..f65e43b26a 100644
--- a/cognite/client/_api/data_modeling/spaces.py
+++ b/cognite/client/_api/data_modeling/spaces.py
@@ -46,11 +46,11 @@ async def __call__(
Fetches spaces as they are iterated over, so you keep a limited number of spaces in memory.
Args:
- chunk_size (int | None): Number of spaces to return in each chunk. Defaults to yielding one space a time.
- limit (int | None): Maximum number of spaces to return. Defaults to returning all items.
+ chunk_size: Number of spaces to return in each chunk. Defaults to yielding one space a time.
+ limit: Maximum number of spaces to return. Defaults to returning all items.
Yields:
- Space | SpaceList: yields Space one by one if chunk_size is not specified, else SpaceList objects.
+ yields Space one by one if chunk_size is not specified, else SpaceList objects.
""" # noqa: DOC404
async for item in self._list_generator(
list_cls=SpaceList,
@@ -72,10 +72,10 @@ async def retrieve(self, spaces: str | SequenceNotStr[str]) -> Space | SpaceList
"""`Retrieve one or more spaces. `_
Args:
- spaces (str | SequenceNotStr[str]): Space ID
+ spaces: Space ID
Returns:
- Space | SpaceList | None: Requested space or None if it does not exist.
+ Requested space or None if it does not exist.
Examples:
@@ -101,9 +101,9 @@ async def delete(self, spaces: str | SequenceNotStr[str]) -> list[str]:
"""`Delete one or more spaces `_
Args:
- spaces (str | SequenceNotStr[str]): ID or ID list ids of spaces.
+ spaces: ID or ID list ids of spaces.
Returns:
- list[str]: The space(s) which has been deleted.
+ The space(s) which has been deleted.
Examples:
Delete spaces by id:
@@ -132,11 +132,11 @@ async def list(
"""`List spaces `_
Args:
- limit (int | None): Maximum number of spaces to return. Defaults to 10. Set to -1, float("inf") or None to return all items.
- include_global (bool): Whether to include global spaces. Defaults to False.
+ limit: Maximum number of spaces to return. Defaults to 10. Set to -1, float("inf") or None to return all items.
+ include_global: Whether to include global spaces. Defaults to False.
Returns:
- SpaceList: List of requested spaces
+ List of requested spaces
Examples:
@@ -176,10 +176,10 @@ async def apply(self, spaces: SpaceApply | Sequence[SpaceApply]) -> Space | Spac
"""`Create or patch one or more spaces. `_
Args:
- spaces (SpaceApply | Sequence[SpaceApply]): Space | Sequence[Space]): Space or spaces of spacesda to create or update.
+ spaces: Space or spaces of spacesda to create or update.
Returns:
- Space | SpaceList: Created space(s)
+ Created space(s)
Examples:
diff --git a/cognite/client/_api/data_modeling/statistics.py b/cognite/client/_api/data_modeling/statistics.py
index ea5264841d..7407670c67 100644
--- a/cognite/client/_api/data_modeling/statistics.py
+++ b/cognite/client/_api/data_modeling/statistics.py
@@ -33,7 +33,7 @@ async def project(self) -> ProjectStatistics:
Returns the usage data and limits for a project's data modelling usage, including data model schemas and graph instances
Returns:
- ProjectStatistics: The requested statistics and limits
+ The requested statistics and limits
Examples:
diff --git a/cognite/client/_api/data_modeling/views.py b/cognite/client/_api/data_modeling/views.py
index c21b97fe2c..c99120a8a5 100644
--- a/cognite/client/_api/data_modeling/views.py
+++ b/cognite/client/_api/data_modeling/views.py
@@ -72,15 +72,15 @@ async def __call__(
Fetches views as they are iterated over, so you keep a limited number of views in memory.
Args:
- chunk_size (int | None): Number of views to return in each chunk. Defaults to yielding one view at a time.
- limit (int | None): Maximum number of views to return. Defaults to returning all items.
- space (str | None): (str | None): The space to query.
- include_inherited_properties (bool): Whether to include properties inherited from views this view implements.
- all_versions (bool): Whether to return all versions. If false, only the newest version is returned, which is determined based on the 'createdTime' field.
- include_global (bool): Whether to include global views.
+ chunk_size: Number of views to return in each chunk. Defaults to yielding one view at a time.
+ limit: Maximum number of views to return. Defaults to returning all items.
+ space: The space to query.
+ include_inherited_properties: Whether to include properties inherited from views this view implements.
+ all_versions: Whether to return all versions. If false, only the newest version is returned, which is determined based on the 'createdTime' field.
+ include_global: Whether to include global views.
Yields:
- View | ViewList: yields View one by one if chunk_size is not specified, else ViewList objects.
+ yields View one by one if chunk_size is not specified, else ViewList objects.
""" # noqa: DOC404
filter_ = ViewFilter(space, include_inherited_properties, all_versions, include_global)
async for item in self._list_generator(
@@ -109,15 +109,12 @@ async def retrieve(
"""`Retrieve a single view by id. `_
Args:
- ids (ViewIdentifier | Sequence[ViewIdentifier]): The view identifier(s). This can be given as a tuple of
- strings or a ViewId object. For example, ("my_space", "my_view"), ("my_space", "my_view", "my_version"),
- or ViewId("my_space", "my_view", "my_version"). Note that version is optional, if not provided, all versions
- will be returned.
- include_inherited_properties (bool): Whether to include properties inherited from views this view implements.
- all_versions (bool): Whether to return all versions. If false, only the newest version is returned (based on created_time)
+ ids: The view identifier(s). This can be given as a tuple of strings or a ViewId object. For example, ("my_space", "my_view"), ("my_space", "my_view", "my_version"), or ViewId("my_space", "my_view", "my_version"). Note that version is optional, if not provided, all versions will be returned.
+ include_inherited_properties: Whether to include properties inherited from views this view implements.
+ all_versions: Whether to return all versions. If false, only the newest version is returned (based on created_time)
Returns:
- ViewList: Requested view or None if it does not exist.
+ Requested view or None if it does not exist.
Examples:
@@ -144,9 +141,9 @@ async def delete(self, ids: ViewIdentifier | Sequence[ViewIdentifier]) -> list[V
"""`Delete one or more views `_
Args:
- ids (ViewIdentifier | Sequence[ViewIdentifier]): View identifier(s)
+ ids: View identifier(s)
Returns:
- list[ViewId]: The identifier for the view(s) which has been deleted. Empty list if nothing was deleted.
+ The identifier for the view(s) which has been deleted. Empty list if nothing was deleted.
Examples:
Delete views by id:
@@ -178,14 +175,14 @@ async def list(
"""`List views `_
Args:
- limit (int | None): Maximum number of views to return. Defaults to 10. Set to -1, float("inf") or None to return all items.
- space (str | None): (str | None): The space to query.
- include_inherited_properties (bool): Whether to include properties inherited from views this view implements.
- all_versions (bool): Whether to return all versions. If false, only the newest version is returned, which is determined based on the 'createdTime' field.
- include_global (bool): Whether to include global views.
+ limit: Maximum number of views to return. Defaults to 10. Set to -1, float("inf") or None to return all items.
+ space: The space to query.
+ include_inherited_properties: Whether to include properties inherited from views this view implements.
+ all_versions: Whether to return all versions. If false, only the newest version is returned, which is determined based on the 'createdTime' field.
+ include_global: Whether to include global views.
Returns:
- ViewList: List of requested views
+ List of requested views
Examples:
@@ -227,10 +224,10 @@ async def apply(self, view: ViewApply | Sequence[ViewApply]) -> View | ViewList:
"""`Create or update (upsert) one or more views. `_
Args:
- view (ViewApply | Sequence[ViewApply]): View(s) to create or update.
+ view: View(s) to create or update.
Returns:
- View | ViewList: Created view(s)
+ Created view(s)
Examples:
diff --git a/cognite/client/_api/data_sets.py b/cognite/client/_api/data_sets.py
index d1208850e8..cbb422b5cd 100644
--- a/cognite/client/_api/data_sets.py
+++ b/cognite/client/_api/data_sets.py
@@ -67,16 +67,16 @@ async def __call__(
Fetches data sets as they are iterated over, so you keep a limited number of data sets in memory.
Args:
- chunk_size (int | None): Number of data sets to return in each chunk. Defaults to yielding one data set a time.
- metadata (dict[str, str] | None): Custom, application-specific metadata. String key -> String value.
- created_time (dict[str, Any] | TimestampRange | None): Range between two timestamps.
- last_updated_time (dict[str, Any] | TimestampRange | None): Range between two timestamps.
- external_id_prefix (str | None): Filter by this (case-sensitive) prefix for the external ID.
- write_protected (bool | None): Specify whether the filtered data sets are write-protected, or not. Set to True to only list write-protected data sets.
- limit (int | None): Maximum number of data sets to return. Defaults to return all items.
+ chunk_size: Number of data sets to return in each chunk. Defaults to yielding one data set a time.
+ metadata: Custom, application-specific metadata. String key -> String value.
+ created_time: Range between two timestamps.
+ last_updated_time: Range between two timestamps.
+ external_id_prefix: Filter by this (case-sensitive) prefix for the external ID.
+ write_protected: Specify whether the filtered data sets are write-protected, or not. Set to True to only list write-protected data sets.
+ limit: Maximum number of data sets to return. Defaults to return all items.
Yields:
- DataSet | DataSetList: yields DataSet one by one if chunk is not specified, else DataSetList objects.
+ yields DataSet one by one if chunk is not specified, else DataSetList objects.
""" # noqa: DOC404
filter = DataSetFilter(
metadata=metadata,
@@ -102,10 +102,10 @@ async def create(
"""`Create one or more data sets. `_
Args:
- data_set (DataSet | DataSetWrite | Sequence[DataSet] | Sequence[DataSetWrite]): Union[DataSet, Sequence[DataSet]]: Data set or list of data sets to create.
+ data_set: Union[DataSet, Sequence[DataSet]]: Data set or list of data sets to create.
Returns:
- DataSet | DataSetList: Created data set(s)
+ Created data set(s)
Examples:
@@ -126,11 +126,11 @@ async def retrieve(self, id: int | None = None, external_id: str | None = None)
"""`Retrieve a single data set by id. `_
Args:
- id (int | None): ID
- external_id (str | None): External ID
+ id: ID
+ external_id: External ID
Returns:
- DataSet | None: Requested data set or None if it does not exist.
+ Requested data set or None if it does not exist.
Examples:
@@ -157,12 +157,12 @@ async def retrieve_multiple(
"""`Retrieve multiple data sets by id. `_
Args:
- ids (Sequence[int] | None): IDs
- external_ids (SequenceNotStr[str] | None): External IDs
- ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception.
+ ids: IDs
+ external_ids: External IDs
+ ignore_unknown_ids: Ignore IDs and external IDs that are not found rather than throw an exception.
Returns:
- DataSetList: The requested data sets.
+ The requested data sets.
Examples:
@@ -186,10 +186,10 @@ async def aggregate_count(self, filter: DataSetFilter | dict[str, Any] | None =
"""`Aggregate data sets `_
Args:
- filter (DataSetFilter | dict[str, Any] | None): Filter on data set filter with exact match
+ filter: Filter on data set filter with exact match
Returns:
- int: Count of data sets matching the filter.
+ Count of data sets matching the filter.
Examples:
@@ -226,11 +226,11 @@ async def update(
"""`Update one or more data sets `_
Args:
- item (DataSet | DataSetWrite | DataSetUpdate | Sequence[DataSet | DataSetWrite | DataSetUpdate]): Data set(s) to update
- mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (DataSet or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`.
+ item: Data set(s) to update
+ mode: How to update data when a non-update object is given (DataSet or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`.
Returns:
- DataSet | DataSetList: Updated data set(s)
+ Updated data set(s)
Examples:
@@ -265,15 +265,15 @@ async def list(
"""`List data sets `_
Args:
- metadata (dict[str, str] | None): Custom, application-specific metadata. String key -> String value.
- created_time (dict[str, Any] | TimestampRange | None): Range between two timestamps.
- last_updated_time (dict[str, Any] | TimestampRange | None): Range between two timestamps.
- external_id_prefix (str | None): Filter by this (case-sensitive) prefix for the external ID.
- write_protected (bool | None): Specify whether the filtered data sets are write-protected, or not. Set to True to only list write-protected data sets.
- limit (int | None): Maximum number of data sets to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
+ metadata: Custom, application-specific metadata. String key -> String value.
+ created_time: Range between two timestamps.
+ last_updated_time: Range between two timestamps.
+ external_id_prefix: Filter by this (case-sensitive) prefix for the external ID.
+ write_protected: Specify whether the filtered data sets are write-protected, or not. Set to True to only list write-protected data sets.
+ limit: Maximum number of data sets to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
Returns:
- DataSetList: List of requested data sets
+ List of requested data sets
Examples:
diff --git a/cognite/client/_api/datapoints.py b/cognite/client/_api/datapoints.py
index 5c25767080..b6aff2dd50 100644
--- a/cognite/client/_api/datapoints.py
+++ b/cognite/client/_api/datapoints.py
@@ -599,13 +599,13 @@ async def __call__(
No empty chunk is ever returned.
Args:
- queries (DatapointsQuery | Sequence[DatapointsQuery]): Query, or queries, using id, external_id or instance_id for the time series to fetch data for, with individual settings specified. The options 'limit' and 'include_outside_points' are not supported when iterating.
- chunk_size_datapoints (int): The number of datapoints per time series to yield per iteration. Must evenly divide 100k OR be an integer multiple of 100k. Default: 100_000.
- chunk_size_time_series (int | None): The max number of time series to yield per iteration (varies as time series get exhausted, but is never empty). Default: None (all given queries are iterated at the same time).
- return_arrays (bool): Whether to return the datapoints as numpy arrays. Default: True.
+ queries: Query, or queries, using id, external_id or instance_id for the time series to fetch data for, with individual settings specified. The options 'limit' and 'include_outside_points' are not supported when iterating.
+ chunk_size_datapoints: The number of datapoints per time series to yield per iteration. Must evenly divide 100k OR be an integer multiple of 100k. Default: 100_000.
+ chunk_size_time_series: The max number of time series to yield per iteration (varies as time series get exhausted, but is never empty). Default: None (all given queries are iterated at the same time).
+ return_arrays: Whether to return the datapoints as numpy arrays. Default: True.
Yields:
- DatapointsArray | DatapointsArrayList | Datapoints | DatapointsList: If return_arrays=True, a ``DatapointsArray`` object containing the datapoints chunk, or a ``DatapointsArrayList`` if multiple time series were asked for. When False, a ``Datapoints`` object containing the datapoints chunk, or a ``DatapointsList`` if multiple time series were asked for.
+ If return_arrays=True, a ``DatapointsArray`` object containing the datapoints chunk, or a ``DatapointsArrayList`` if multiple time series were asked for. When False, a ``Datapoints`` object containing the datapoints chunk, or a ``DatapointsList`` if multiple time series were asked for.
Examples:
@@ -1005,25 +1005,25 @@ async def retrieve(
`status codes. `_
Args:
- id (None | int | DatapointsQuery | Sequence[int | DatapointsQuery]): Id, dict (with id) or (mixed) sequence of these. See examples below.
- external_id (None | str | DatapointsQuery | SequenceNotStr[str | DatapointsQuery]): External id, dict (with external id) or (mixed) sequence of these. See examples below.
- instance_id (None | NodeId | DatapointsQuery | Sequence[NodeId | DatapointsQuery]): Instance id or sequence of instance ids.
- start (int | str | datetime.datetime | None): Inclusive start. Default: 1970-01-01 UTC.
- end (int | str | datetime.datetime | None): Exclusive end. Default: "now"
- aggregates (Aggregate | str | list[Aggregate | str] | None): Single aggregate or list of aggregates to retrieve. Available options: ``average``, ``continuous_variance``, ``count``, ``count_bad``, ``count_good``, ``count_uncertain``, ``discrete_variance``, ``duration_bad``, ``duration_good``, ``duration_uncertain``, ``interpolation``, ``max``, ``max_datapoint``, ``min``, ``min_datapoint``, ``step_interpolation``, ``sum`` and ``total_variation``. Default: None (raw datapoints returned)
- granularity (str | None): The granularity to fetch aggregates at. Can be given as an abbreviation or spelled out for clarity: ``s/second(s)``, ``m/minute(s)``, ``h/hour(s)``, ``d/day(s)``, ``w/week(s)``, ``mo/month(s)``, ``q/quarter(s)``, or ``y/year(s)``. Examples: ``30s``, ``5m``, ``1day``, ``2weeks``. Default: None.
- timezone (str | datetime.timezone | ZoneInfo | None): For raw datapoints, which timezone to use when displaying (will not affect what is retrieved). For aggregates, which timezone to align to for granularity 'hour' and longer. Align to the start of the hour, day or month. For timezones of type Region/Location, like 'Europe/Oslo', pass a string or ``ZoneInfo`` instance. The aggregate duration will then vary, typically due to daylight saving time. You can also use a fixed offset from UTC by passing a string like '+04:00', 'UTC-7' or 'UTC-02:30' or an instance of ``datetime.timezone``. Note: Historical timezones with second offset are not supported, and timezones with minute offsets (e.g. UTC+05:30 or Asia/Kolkata) may take longer to execute.
- target_unit (str | None): The unit_external_id of the datapoints returned. If the time series does not have a unit_external_id that can be converted to the target_unit, an error will be returned. Cannot be used with target_unit_system.
- target_unit_system (str | None): The unit system of the datapoints returned. Cannot be used with target_unit.
- limit (int | None): Maximum number of datapoints to return for each time series. Default: None (no limit)
- include_outside_points (bool): Whether to include outside points. Not allowed when fetching aggregates. Default: False
- ignore_unknown_ids (bool): Whether to ignore missing time series rather than raising an exception. Default: False
- include_status (bool): Also return the status code, an integer, for each datapoint in the response. Only relevant for raw datapoint queries, and the object aggregates ``min_datapoint`` and ``max_datapoint``.
- ignore_bad_datapoints (bool): Treat datapoints with a bad status code as if they do not exist. If set to false, raw queries will include bad datapoints in the response, and aggregates will in general omit the time period between a bad datapoint and the next good datapoint. Also, the period between a bad datapoint and the previous good datapoint will be considered constant. Default: True.
- treat_uncertain_as_bad (bool): Treat datapoints with uncertain status codes as bad. If false, treat datapoints with uncertain status codes as good. Used for both raw queries and aggregates. Default: True.
+ id: Id, dict (with id) or (mixed) sequence of these. See examples below.
+ external_id: External id, dict (with external id) or (mixed) sequence of these. See examples below.
+ instance_id: Instance id or sequence of instance ids.
+ start: Inclusive start. Default: 1970-01-01 UTC.
+ end: Exclusive end. Default: "now"
+ aggregates: Single aggregate or list of aggregates to retrieve. Available options: ``average``, ``continuous_variance``, ``count``, ``count_bad``, ``count_good``, ``count_uncertain``, ``discrete_variance``, ``duration_bad``, ``duration_good``, ``duration_uncertain``, ``interpolation``, ``max``, ``max_datapoint``, ``min``, ``min_datapoint``, ``step_interpolation``, ``sum`` and ``total_variation``. Default: None (raw datapoints returned)
+ granularity: The granularity to fetch aggregates at. Can be given as an abbreviation or spelled out for clarity: ``s/second(s)``, ``m/minute(s)``, ``h/hour(s)``, ``d/day(s)``, ``w/week(s)``, ``mo/month(s)``, ``q/quarter(s)``, or ``y/year(s)``. Examples: ``30s``, ``5m``, ``1day``, ``2weeks``. Default: None.
+ timezone: For raw datapoints, which timezone to use when displaying (will not affect what is retrieved). For aggregates, which timezone to align to for granularity 'hour' and longer. Align to the start of the hour, day or month. For timezones of type Region/Location, like 'Europe/Oslo', pass a string or ``ZoneInfo`` instance. The aggregate duration will then vary, typically due to daylight saving time. You can also use a fixed offset from UTC by passing a string like '+04:00', 'UTC-7' or 'UTC-02:30' or an instance of ``datetime.timezone``. Note: Historical timezones with second offset are not supported, and timezones with minute offsets (e.g. UTC+05:30 or Asia/Kolkata) may take longer to execute.
+ target_unit: The unit_external_id of the datapoints returned. If the time series does not have a unit_external_id that can be converted to the target_unit, an error will be returned. Cannot be used with target_unit_system.
+ target_unit_system: The unit system of the datapoints returned. Cannot be used with target_unit.
+ limit: Maximum number of datapoints to return for each time series. Default: None (no limit)
+ include_outside_points: Whether to include outside points. Not allowed when fetching aggregates. Default: False
+ ignore_unknown_ids: Whether to ignore missing time series rather than raising an exception. Default: False
+ include_status: Also return the status code, an integer, for each datapoint in the response. Only relevant for raw datapoint queries, and the object aggregates ``min_datapoint`` and ``max_datapoint``.
+ ignore_bad_datapoints: Treat datapoints with a bad status code as if they do not exist. If set to false, raw queries will include bad datapoints in the response, and aggregates will in general omit the time period between a bad datapoint and the next good datapoint. Also, the period between a bad datapoint and the previous good datapoint will be considered constant. Default: True.
+ treat_uncertain_as_bad: Treat datapoints with uncertain status codes as bad. If false, treat datapoints with uncertain status codes as good. Used for both raw queries and aggregates. Default: True.
Returns:
- Datapoints | DatapointsList | None: A ``Datapoints`` object containing the requested data, or a ``DatapointsList`` if multiple time series were asked for (the ordering is ids first, then external_ids). If `ignore_unknown_ids` is `True`, a single time series is requested and it is not found, the function will return `None`.
+ A ``Datapoints`` object containing the requested data, or a ``DatapointsList`` if multiple time series were asked for (the ordering is ids first, then external_ids). If `ignore_unknown_ids` is `True`, a single time series is requested and it is not found, the function will return `None`.
Examples:
@@ -1356,25 +1356,25 @@ async def retrieve_arrays(
`status codes. `_
Args:
- id (None | int | DatapointsQuery | Sequence[int | DatapointsQuery]): Id, dict (with id) or (mixed) sequence of these. See examples below.
- external_id (None | str | DatapointsQuery | SequenceNotStr[str | DatapointsQuery]): External id, dict (with external id) or (mixed) sequence of these. See examples below.
- instance_id (None | NodeId | DatapointsQuery | Sequence[NodeId | DatapointsQuery]): Instance id or sequence of instance ids.
- start (int | str | datetime.datetime | None): Inclusive start. Default: 1970-01-01 UTC.
- end (int | str | datetime.datetime | None): Exclusive end. Default: "now"
- aggregates (Aggregate | str | list[Aggregate | str] | None): Single aggregate or list of aggregates to retrieve. Available options: ``average``, ``continuous_variance``, ``count``, ``count_bad``, ``count_good``, ``count_uncertain``, ``discrete_variance``, ``duration_bad``, ``duration_good``, ``duration_uncertain``, ``interpolation``, ``max``, ``max_datapoint``, ``min``, ``min_datapoint``, ``step_interpolation``, ``sum`` and ``total_variation``. Default: None (raw datapoints returned)
- granularity (str | None): The granularity to fetch aggregates at. Can be given as an abbreviation or spelled out for clarity: ``s/second(s)``, ``m/minute(s)``, ``h/hour(s)``, ``d/day(s)``, ``w/week(s)``, ``mo/month(s)``, ``q/quarter(s)``, or ``y/year(s)``. Examples: ``30s``, ``5m``, ``1day``, ``2weeks``. Default: None.
- timezone (str | datetime.timezone | ZoneInfo | None): For raw datapoints, which timezone to use when displaying (will not affect what is retrieved). For aggregates, which timezone to align to for granularity 'hour' and longer. Align to the start of the hour, day or month. For timezones of type Region/Location, like 'Europe/Oslo', pass a string or ``ZoneInfo`` instance. The aggregate duration will then vary, typically due to daylight saving time. You can also use a fixed offset from UTC by passing a string like '+04:00', 'UTC-7' or 'UTC-02:30' or an instance of ``datetime.timezone``. Note: Historical timezones with second offset are not supported, and timezones with minute offsets (e.g. UTC+05:30 or Asia/Kolkata) may take longer to execute.
- target_unit (str | None): The unit_external_id of the datapoints returned. If the time series does not have a unit_external_id that can be converted to the target_unit, an error will be returned. Cannot be used with target_unit_system.
- target_unit_system (str | None): The unit system of the datapoints returned. Cannot be used with target_unit.
- limit (int | None): Maximum number of datapoints to return for each time series. Default: None (no limit)
- include_outside_points (bool): Whether to include outside points. Not allowed when fetching aggregates. Default: False
- ignore_unknown_ids (bool): Whether to ignore missing time series rather than raising an exception. Default: False
- include_status (bool): Also return the status code, an integer, for each datapoint in the response. Only relevant for raw datapoint queries, and the object aggregates ``min_datapoint`` and ``max_datapoint``.
- ignore_bad_datapoints (bool): Treat datapoints with a bad status code as if they do not exist. If set to false, raw queries will include bad datapoints in the response, and aggregates will in general omit the time period between a bad datapoint and the next good datapoint. Also, the period between a bad datapoint and the previous good datapoint will be considered constant. Default: True.
- treat_uncertain_as_bad (bool): Treat datapoints with uncertain status codes as bad. If false, treat datapoints with uncertain status codes as good. Used for both raw queries and aggregates. Default: True.
+ id: Id, dict (with id) or (mixed) sequence of these. See examples below.
+ external_id: External id, dict (with external id) or (mixed) sequence of these. See examples below.
+ instance_id: Instance id or sequence of instance ids.
+ start: Inclusive start. Default: 1970-01-01 UTC.
+ end: Exclusive end. Default: "now"
+ aggregates: Single aggregate or list of aggregates to retrieve. Available options: ``average``, ``continuous_variance``, ``count``, ``count_bad``, ``count_good``, ``count_uncertain``, ``discrete_variance``, ``duration_bad``, ``duration_good``, ``duration_uncertain``, ``interpolation``, ``max``, ``max_datapoint``, ``min``, ``min_datapoint``, ``step_interpolation``, ``sum`` and ``total_variation``. Default: None (raw datapoints returned)
+ granularity: The granularity to fetch aggregates at. Can be given as an abbreviation or spelled out for clarity: ``s/second(s)``, ``m/minute(s)``, ``h/hour(s)``, ``d/day(s)``, ``w/week(s)``, ``mo/month(s)``, ``q/quarter(s)``, or ``y/year(s)``. Examples: ``30s``, ``5m``, ``1day``, ``2weeks``. Default: None.
+ timezone: For raw datapoints, which timezone to use when displaying (will not affect what is retrieved). For aggregates, which timezone to align to for granularity 'hour' and longer. Align to the start of the hour, day or month. For timezones of type Region/Location, like 'Europe/Oslo', pass a string or ``ZoneInfo`` instance. The aggregate duration will then vary, typically due to daylight saving time. You can also use a fixed offset from UTC by passing a string like '+04:00', 'UTC-7' or 'UTC-02:30' or an instance of ``datetime.timezone``. Note: Historical timezones with second offset are not supported, and timezones with minute offsets (e.g. UTC+05:30 or Asia/Kolkata) may take longer to execute.
+ target_unit: The unit_external_id of the datapoints returned. If the time series does not have a unit_external_id that can be converted to the target_unit, an error will be returned. Cannot be used with target_unit_system.
+ target_unit_system: The unit system of the datapoints returned. Cannot be used with target_unit.
+ limit: Maximum number of datapoints to return for each time series. Default: None (no limit)
+ include_outside_points: Whether to include outside points. Not allowed when fetching aggregates. Default: False
+ ignore_unknown_ids: Whether to ignore missing time series rather than raising an exception. Default: False
+ include_status: Also return the status code, an integer, for each datapoint in the response. Only relevant for raw datapoint queries, and the object aggregates ``min_datapoint`` and ``max_datapoint``.
+ ignore_bad_datapoints: Treat datapoints with a bad status code as if they do not exist. If set to false, raw queries will include bad datapoints in the response, and aggregates will in general omit the time period between a bad datapoint and the next good datapoint. Also, the period between a bad datapoint and the previous good datapoint will be considered constant. Default: True.
+ treat_uncertain_as_bad: Treat datapoints with uncertain status codes as bad. If false, treat datapoints with uncertain status codes as good. Used for both raw queries and aggregates. Default: True.
Returns:
- DatapointsArray | DatapointsArrayList | None: A ``DatapointsArray`` object containing the requested data, or a ``DatapointsArrayList`` if multiple time series were asked for (the ordering is ids first, then external_ids). If `ignore_unknown_ids` is `True`, a single time series is requested and it is not found, the function will return `None`.
+ A ``DatapointsArray`` object containing the requested data, or a ``DatapointsArrayList`` if multiple time series were asked for (the ordering is ids first, then external_ids). If `ignore_unknown_ids` is `True`, a single time series is requested and it is not found, the function will return `None`.
Note:
For many more usage examples, check out the :py:meth:`~DatapointsAPI.retrieve` method which accepts exactly the same arguments.
@@ -1491,29 +1491,29 @@ async def retrieve_dataframe(
For many more usage examples, check out the :py:meth:`~DatapointsAPI.retrieve` method which accepts exactly the same arguments.
Args:
- id (None | int | DatapointsQuery | Sequence[int | DatapointsQuery]): Id, DatapointsQuery or (mixed) sequence of these. See examples.
- external_id (None | str | DatapointsQuery | SequenceNotStr[str | DatapointsQuery]): External id, DatapointsQuery or (mixed) sequence of these. See examples.
- instance_id (None | NodeId | DatapointsQuery | Sequence[NodeId | DatapointsQuery]): Instance id, DatapointsQuery or (mixed) sequence of these. See examples.
- start (int | str | datetime.datetime | None): Inclusive start. Default: 1970-01-01 UTC.
- end (int | str | datetime.datetime | None): Exclusive end. Default: "now"
- aggregates (Aggregate | str | list[Aggregate | str] | None): Single aggregate or list of aggregates to retrieve. Available options: ``average``, ``continuous_variance``, ``count``, ``count_bad``, ``count_good``, ``count_uncertain``, ``discrete_variance``, ``duration_bad``, ``duration_good``, ``duration_uncertain``, ``interpolation``, ``max``, ``max_datapoint``, ``min``, ``min_datapoint``, ``step_interpolation``, ``sum`` and ``total_variation``. Default: None (raw datapoints returned)
- granularity (str | None): The granularity to fetch aggregates at. Can be given as an abbreviation or spelled out for clarity: ``s/second(s)``, ``m/minute(s)``, ``h/hour(s)``, ``d/day(s)``, ``w/week(s)``, ``mo/month(s)``, ``q/quarter(s)``, or ``y/year(s)``. Examples: ``30s``, ``5m``, ``1day``, ``2weeks``. Default: None.
- timezone (str | datetime.timezone | ZoneInfo | None): For raw datapoints, which timezone to use when displaying (will not affect what is retrieved). For aggregates, which timezone to align to for granularity 'hour' and longer. Align to the start of the hour, -day or -month. For timezones of type Region/Location, like 'Europe/Oslo', pass a string or ``ZoneInfo`` instance. The aggregate duration will then vary, typically due to daylight saving time. You can also use a fixed offset from UTC by passing a string like '+04:00', 'UTC-7' or 'UTC-02:30' or an instance of ``datetime.timezone``. Note: Historical timezones with second offset are not supported, and timezones with minute offsets (e.g. UTC+05:30 or Asia/Kolkata) may take longer to execute.
- target_unit (str | None): The unit_external_id of the datapoints returned. If the time series does not have a unit_external_id that can be converted to the target_unit, an error will be returned. Cannot be used with target_unit_system.
- target_unit_system (str | None): The unit system of the datapoints returned. Cannot be used with target_unit.
- limit (int | None): Maximum number of datapoints to return for each time series. Default: None (no limit)
- include_outside_points (bool): Whether to include outside points. Not allowed when fetching aggregates. Default: False
- ignore_unknown_ids (bool): Whether to ignore missing time series rather than raising an exception. Default: False
- ignore_bad_datapoints (bool): Treat datapoints with a bad status code as if they do not exist. If set to false, raw queries will include bad datapoints in the response, and aggregates will in general omit the time period between a bad datapoint and the next good datapoint. Also, the period between a bad datapoint and the previous good datapoint will be considered constant. Default: True.
- treat_uncertain_as_bad (bool): Treat datapoints with uncertain status codes as bad. If false, treat datapoints with uncertain status codes as good. Used for both raw queries and aggregates. Default: True.
- uniform_index (bool): If only querying aggregates AND a single granularity is used (that's NOT a calendar granularity like month/quarter/year) AND no limit is used AND no timezone is used, specifying `uniform_index=True` will return a dataframe with an equidistant datetime index from the earliest `start` to the latest `end` (missing values will be NaNs). If these requirements are not met, a ValueError is raised. Default: False
- include_status (bool): Also return the status code, an integer, for each datapoint in the response. Only relevant for raw datapoint queries, and the object aggregates ``min_datapoint`` and ``max_datapoint``. Also adds the status info as a separate level in the columns (MultiIndex).
- include_unit (bool): Include the unit_external_id in the dataframe columns, if present (separate MultiIndex level)
- include_aggregate_name (bool): Include aggregate in the dataframe columns, if present (separate MultiIndex level)
- include_granularity_name (bool): Include granularity in the dataframe columns, if present (separate MultiIndex level)
+ id: Id, DatapointsQuery or (mixed) sequence of these. See examples.
+ external_id: External id, DatapointsQuery or (mixed) sequence of these. See examples.
+ instance_id: Instance id, DatapointsQuery or (mixed) sequence of these. See examples.
+ start: Inclusive start. Default: 1970-01-01 UTC.
+ end: Exclusive end. Default: "now"
+ aggregates: Single aggregate or list of aggregates to retrieve. Available options: ``average``, ``continuous_variance``, ``count``, ``count_bad``, ``count_good``, ``count_uncertain``, ``discrete_variance``, ``duration_bad``, ``duration_good``, ``duration_uncertain``, ``interpolation``, ``max``, ``max_datapoint``, ``min``, ``min_datapoint``, ``step_interpolation``, ``sum`` and ``total_variation``. Default: None (raw datapoints returned)
+ granularity: The granularity to fetch aggregates at. Can be given as an abbreviation or spelled out for clarity: ``s/second(s)``, ``m/minute(s)``, ``h/hour(s)``, ``d/day(s)``, ``w/week(s)``, ``mo/month(s)``, ``q/quarter(s)``, or ``y/year(s)``. Examples: ``30s``, ``5m``, ``1day``, ``2weeks``. Default: None.
+ timezone: For raw datapoints, which timezone to use when displaying (will not affect what is retrieved). For aggregates, which timezone to align to for granularity 'hour' and longer. Align to the start of the hour, -day or -month. For timezones of type Region/Location, like 'Europe/Oslo', pass a string or ``ZoneInfo`` instance. The aggregate duration will then vary, typically due to daylight saving time. You can also use a fixed offset from UTC by passing a string like '+04:00', 'UTC-7' or 'UTC-02:30' or an instance of ``datetime.timezone``. Note: Historical timezones with second offset are not supported, and timezones with minute offsets (e.g. UTC+05:30 or Asia/Kolkata) may take longer to execute.
+ target_unit: The unit_external_id of the datapoints returned. If the time series does not have a unit_external_id that can be converted to the target_unit, an error will be returned. Cannot be used with target_unit_system.
+ target_unit_system: The unit system of the datapoints returned. Cannot be used with target_unit.
+ limit: Maximum number of datapoints to return for each time series. Default: None (no limit)
+ include_outside_points: Whether to include outside points. Not allowed when fetching aggregates. Default: False
+ ignore_unknown_ids: Whether to ignore missing time series rather than raising an exception. Default: False
+ ignore_bad_datapoints: Treat datapoints with a bad status code as if they do not exist. If set to false, raw queries will include bad datapoints in the response, and aggregates will in general omit the time period between a bad datapoint and the next good datapoint. Also, the period between a bad datapoint and the previous good datapoint will be considered constant. Default: True.
+ treat_uncertain_as_bad: Treat datapoints with uncertain status codes as bad. If false, treat datapoints with uncertain status codes as good. Used for both raw queries and aggregates. Default: True.
+ uniform_index: If only querying aggregates AND a single granularity is used (that's NOT a calendar granularity like month/quarter/year) AND no limit is used AND no timezone is used, specifying `uniform_index=True` will return a dataframe with an equidistant datetime index from the earliest `start` to the latest `end` (missing values will be NaNs). If these requirements are not met, a ValueError is raised. Default: False
+ include_status: Also return the status code, an integer, for each datapoint in the response. Only relevant for raw datapoint queries, and the object aggregates ``min_datapoint`` and ``max_datapoint``. Also adds the status info as a separate level in the columns (MultiIndex).
+ include_unit: Include the unit_external_id in the dataframe columns, if present (separate MultiIndex level)
+ include_aggregate_name: Include aggregate in the dataframe columns, if present (separate MultiIndex level)
+ include_granularity_name: Include granularity in the dataframe columns, if present (separate MultiIndex level)
Returns:
- pd.DataFrame: A pandas DataFrame containing the requested time series. The ordering of columns is ids first, then external_ids, and lastly instance_ids. For time series with multiple aggregates, they will be sorted in alphabetical order ("average" before "max").
+ A pandas DataFrame containing the requested time series. The ordering of columns is ids first, then external_ids, and lastly instance_ids. For time series with multiple aggregates, they will be sorted in alphabetical order ("average" before "max").
Tip:
Pandas DataFrames have one shared index, so when you fetch datapoints from multiple time series, the final index will be
@@ -1819,19 +1819,19 @@ async def retrieve_latest(
`status codes. `_
Args:
- id (int | LatestDatapointQuery | Sequence[int | LatestDatapointQuery] | None): Id or list of ids.
- external_id (str | LatestDatapointQuery | SequenceNotStr[str | LatestDatapointQuery] | None): External id or list of external ids.
- instance_id (NodeId | LatestDatapointQuery | Sequence[NodeId | LatestDatapointQuery] | None): Instance id or list of instance ids.
- before (None | int | str | datetime.datetime): Get latest datapoint before this time. Not used when passing 'LatestDatapointQuery'.
- target_unit (str | None): The unit_external_id of the datapoint returned. If the time series does not have a unit_external_id that can be converted to the target_unit, an error will be returned. Cannot be used with target_unit_system.
- target_unit_system (str | None): The unit system of the datapoint returned. Cannot be used with target_unit.
- include_status (bool): Also return the status code, an integer, for each datapoint in the response.
- ignore_bad_datapoints (bool): Prevent datapoints with a bad status code to be returned. Default: True.
- treat_uncertain_as_bad (bool): Treat uncertain status codes as bad. If false, treat uncertain as good. Default: True.
- ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception.
+ id: Id or list of ids.
+ external_id: External id or list of external ids.
+ instance_id: Instance id or list of instance ids.
+ before: Get latest datapoint before this time. Not used when passing 'LatestDatapointQuery'.
+ target_unit: The unit_external_id of the datapoint returned. If the time series does not have a unit_external_id that can be converted to the target_unit, an error will be returned. Cannot be used with target_unit_system.
+ target_unit_system: The unit system of the datapoint returned. Cannot be used with target_unit.
+ include_status: Also return the status code, an integer, for each datapoint in the response.
+ ignore_bad_datapoints: Prevent datapoints with a bad status code to be returned. Default: True.
+ treat_uncertain_as_bad: Treat uncertain status codes as bad. If false, treat uncertain as good. Default: True.
+ ignore_unknown_ids: Ignore IDs and external IDs that are not found rather than throw an exception.
Returns:
- Datapoints | DatapointsList | None: A Datapoints object containing the requested data, or a DatapointsList if multiple were requested. If `ignore_unknown_ids` is `True`, a single time series is requested and it is not found, the function will return `None`.
+ A Datapoints object containing the requested data, or a DatapointsList if multiple were requested. If `ignore_unknown_ids` is `True`, a single time series is requested and it is not found, the function will return `None`.
Examples:
@@ -1937,10 +1937,10 @@ async def insert(
`status codes. `_
Args:
- datapoints (Datapoints | DatapointsArray | Sequence[dict[str, int | float | str | datetime.datetime]] | Sequence[tuple[int | float | datetime.datetime, int | float | str] | tuple[int | float | datetime.datetime, int | float | str, int]]): The datapoints you wish to insert. Can either be a list of tuples, a list of dictionaries, a Datapoints object or a DatapointsArray object. See examples below.
- id (int | None): Id of time series to insert datapoints into.
- external_id (str | None): External id of time series to insert datapoint into.
- instance_id (NodeId | None): Instance ID of time series to insert datapoints into.
+ datapoints: The datapoints you wish to insert. Can either be a list of tuples, a list of dictionaries, a Datapoints object or a DatapointsArray object. See examples below.
+ id: Id of time series to insert datapoints into.
+ external_id: External id of time series to insert datapoint into.
+ instance_id: Instance ID of time series to insert datapoints into.
Note:
All datapoints inserted without a status code (or symbol) is assumed to be good (code 0). To mark a value, pass
@@ -2024,7 +2024,7 @@ async def insert_multiple(
`status codes. `_
Args:
- datapoints (list[dict[str, str | int | list | Datapoints | DatapointsArray | NodeId]]): The datapoints you wish to insert along with the ids of the time series. See examples below.
+ datapoints: The datapoints you wish to insert along with the ids of the time series. See examples below.
Note:
All datapoints inserted without a status code (or symbol) is assumed to be good (code 0). To mark a value, pass
@@ -2097,11 +2097,11 @@ async def delete_range(
"""Delete a range of datapoints from a time series.
Args:
- start (int | str | datetime.datetime): Inclusive start of delete range
- end (int | str | datetime.datetime): Exclusive end of delete range
- id (int | None): Id of time series to delete data from
- external_id (str | None): External id of time series to delete data from
- instance_id (NodeId | None): Instance ID of time series to delete data from
+ start: Inclusive start of delete range
+ end: Exclusive end of delete range
+ id: Id of time series to delete data from
+ external_id: External id of time series to delete data from
+ instance_id: Instance ID of time series to delete data from
Examples:
@@ -2129,7 +2129,7 @@ async def delete_ranges(self, ranges: list[dict[str, Any]]) -> None:
"""`Delete a range of datapoints from multiple time series. `_
Args:
- ranges (list[dict[str, Any]]): The list of datapoint ids along with time range to delete. See examples below.
+ ranges: The list of datapoint ids along with time range to delete. See examples below.
Examples:
@@ -2171,8 +2171,8 @@ async def insert_dataframe(self, df: pd.DataFrame, dropna: bool = True) -> None:
The column identifiers must be unique.
Args:
- df (pd.DataFrame): Pandas DataFrame object containing the time series.
- dropna (bool): Set to True to ignore NaNs in the given DataFrame, applied per column. Default: True.
+ df: Pandas DataFrame object containing the time series.
+ dropna: Set to True to ignore NaNs in the given DataFrame, applied per column. Default: True.
Warning:
You can not insert datapoints with status codes using this method (``insert_dataframe``), you'll need
diff --git a/cognite/client/_api/datapoints_subscriptions.py b/cognite/client/_api/datapoints_subscriptions.py
index b1d7f6eaa4..6dff93f609 100644
--- a/cognite/client/_api/datapoints_subscriptions.py
+++ b/cognite/client/_api/datapoints_subscriptions.py
@@ -50,11 +50,11 @@ async def __call__(
"""Iterate over all datapoint subscriptions.
Args:
- chunk_size (int | None): The number of datapoint subscriptions to fetch per request. Defaults to yielding one datapoint subscription at a time.
- limit (int | None): Maximum number of items to return. Defaults to return all datapoint subscriptions.
+ chunk_size: The number of datapoint subscriptions to fetch per request. Defaults to yielding one datapoint subscription at a time.
+ limit: Maximum number of items to return. Defaults to return all datapoint subscriptions.
Yields:
- DatapointSubscription | DatapointSubscriptionList: Yields datapoint subscriptions one by one if chunk is not specified, otherwise returns a list of datapoint subscriptions.
+ Yields datapoint subscriptions one by one if chunk is not specified, otherwise returns a list of datapoint subscriptions.
""" # noqa: DOC404
async for item in self._list_generator(
method="GET",
@@ -71,10 +71,10 @@ async def create(self, subscription: DataPointSubscriptionWrite) -> DatapointSub
Create a subscription that can be used to listen for changes in data points for a set of time series.
Args:
- subscription (DataPointSubscriptionWrite): Subscription to create.
+ subscription: Subscription to create.
Returns:
- DatapointSubscription: Created subscription
+ Created subscription
Examples:
@@ -130,8 +130,8 @@ async def delete(self, external_id: str | SequenceNotStr[str], ignore_unknown_id
"""`Delete subscription(s). This operation cannot be undone. `_
Args:
- external_id (str | SequenceNotStr[str]): External ID or list of external IDs of subscriptions to delete.
- ignore_unknown_ids (bool): Whether to ignore IDs and external IDs that are not found rather than throw an exception.
+ external_id: External ID or list of external IDs of subscriptions to delete.
+ ignore_unknown_ids: Whether to ignore IDs and external IDs that are not found rather than throw an exception.
Examples:
@@ -153,10 +153,10 @@ async def retrieve(self, external_id: str) -> DatapointSubscription | None:
"""`Retrieve one subscription by external ID. `_
Args:
- external_id (str): External ID of the subscription to retrieve.
+ external_id: External ID of the subscription to retrieve.
Returns:
- DatapointSubscription | None: The requested subscription.
+ The requested subscription.
Examples:
@@ -186,11 +186,11 @@ async def list_member_time_series(
Retrieve a list of time series (IDs) that the subscription is currently retrieving updates from
Args:
- external_id (str): External ID of the subscription to retrieve members of.
- limit (int | None): Maximum number of time series to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
+ external_id: External ID of the subscription to retrieve members of.
+ limit: Maximum number of time series to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
Returns:
- TimeSeriesIDList: List of time series in the subscription.
+ List of time series in the subscription.
Examples:
@@ -224,11 +224,11 @@ async def update(
Furthermore, the subscription partition cannot be changed.
Args:
- update (DataPointSubscriptionUpdate | DataPointSubscriptionWrite): The subscription update.
- mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (DataPointSubscriptionWrite). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing.
+ update: The subscription update.
+ mode: How to update data when a non-update object is given (DataPointSubscriptionWrite). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing.
Returns:
- DatapointSubscription: Updated subscription.
+ Updated subscription.
Examples:
@@ -279,18 +279,18 @@ async def iterate_data(
older than 7 days may be discarded.
Args:
- external_id (str): The external ID of the subscription.
- start (str | None): When to start the iteration. If set to None, the iteration will start from the beginning. The format is "N[timeunit]-ago", where timeunit is w,d,h,m (week, day, hour, minute). For example, "12h-ago" will start the iteration from 12 hours ago. You can also set it to "now" to jump straight to the end. Defaults to None.
- limit (int): Approximate number of results to return across all partitions.
- partition (int): The partition to iterate over. Defaults to 0.
- poll_timeout (int): How many seconds to wait for new data, until an empty response is sent. Defaults to 5.
- cursor (str | None): Optional cursor to start iterating from.
- include_status (bool): Also return the status code, an integer, for each datapoint in the response.
- ignore_bad_datapoints (bool): Do not return bad datapoints. Default: True.
- treat_uncertain_as_bad (bool): Treat datapoints with uncertain status codes as bad. If false, treat datapoints with uncertain status codes as good. Default: True.
+ external_id: The external ID of the subscription.
+ start: When to start the iteration. If set to None, the iteration will start from the beginning. The format is "N[timeunit]-ago", where timeunit is w,d,h,m (week, day, hour, minute). For example, "12h-ago" will start the iteration from 12 hours ago. You can also set it to "now" to jump straight to the end. Defaults to None.
+ limit: Approximate number of results to return across all partitions.
+ partition: The partition to iterate over. Defaults to 0.
+ poll_timeout: How many seconds to wait for new data, until an empty response is sent. Defaults to 5.
+ cursor: Optional cursor to start iterating from.
+ include_status: Also return the status code, an integer, for each datapoint in the response.
+ ignore_bad_datapoints: Do not return bad datapoints. Default: True.
+ treat_uncertain_as_bad: Treat datapoints with uncertain status codes as bad. If false, treat datapoints with uncertain status codes as good. Default: True.
Yields:
- DatapointSubscriptionBatch: Changes to the subscription and data in the subscribed time series.
+ Changes to the subscription and data in the subscribed time series.
Examples:
@@ -347,9 +347,9 @@ async def list(self, limit: int | None = DEFAULT_LIMIT_READ) -> DatapointSubscri
"""`List data point subscriptions `_
Args:
- limit (int | None): Maximum number of subscriptions to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
+ limit: Maximum number of subscriptions to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
Returns:
- DatapointSubscriptionList: List of requested datapoint subscriptions
+ List of requested datapoint subscriptions
Examples:
diff --git a/cognite/client/_api/diagrams.py b/cognite/client/_api/diagrams.py
index 4751a78315..c9d11c71bb 100644
--- a/cognite/client/_api/diagrams.py
+++ b/cognite/client/_api/diagrams.py
@@ -137,19 +137,19 @@ async def detect(
are able to access the data sent to this endpoint.
Args:
- entities (Sequence[dict | CogniteResource]): List of entities to detect
- search_field (str): If entities is a list of dictionaries, this is the key to the values to detect in the PnId
- partial_match (bool): Allow for a partial match (e.g. missing prefix).
- min_tokens (int): Minimal number of tokens a match must be based on
- file_ids (int | Sequence[int] | None): ID of the files, should already be uploaded in the same tenant.
- file_external_ids (str | SequenceNotStr[str] | None): File external ids, alternative to file_ids and file_references.
- file_instance_ids (NodeId | Sequence[NodeId] | None): Files to detect in, specified by instance id.
- file_references (list[FileReference] | FileReference | None): File references (id, external_id or instance_id), and first_page and last_page to specify page ranges per file. Each reference can specify up to 50 pages. Providing a page range will also make the page count of the document a part of the response.
- pattern_mode (bool | None): If True, entities must be provided with a sample field. This enables detecting tags that are similar to the sample, but not necessarily identical. Defaults to None.
- configuration (DiagramDetectConfig | None): Additional configuration for the detect algorithm. See `DiagramDetectConfig` class documentation and `beta API docs `_.
- multiple_jobs (bool): Enables you to publish multiple jobs. If True the method returns a tuple of DetectJobBundle and list of potentially unposted files. If False it will return a single DiagramDetectResults. Defaults to False.
+ entities: List of entities to detect
+ search_field: If entities is a list of dictionaries, this is the key to the values to detect in the PnId
+ partial_match: Allow for a partial match (e.g. missing prefix).
+ min_tokens: Minimal number of tokens a match must be based on
+ file_ids: ID of the files, should already be uploaded in the same tenant.
+ file_external_ids: File external ids, alternative to file_ids and file_references.
+ file_instance_ids: Files to detect in, specified by instance id.
+ file_references: File references (id, external_id or instance_id), and first_page and last_page to specify page ranges per file. Each reference can specify up to 50 pages. Providing a page range will also make the page count of the document a part of the response.
+ pattern_mode: If True, entities must be provided with a sample field. This enables detecting tags that are similar to the sample, but not necessarily identical. Defaults to None.
+ configuration: Additional configuration for the detect algorithm. See `DiagramDetectConfig` class documentation and `beta API docs `_.
+ multiple_jobs: Enables you to publish multiple jobs. If True the method returns a tuple of DetectJobBundle and list of potentially unposted files. If False it will return a single DiagramDetectResults. Defaults to False.
Returns:
- DiagramDetectResults | tuple[DetectJobBundle, list[dict[str, Any]]]: Resulting queued job or a bundle of jobs and a list of unposted files. Note that the .result property of the job or job bundle will block waiting for results.
+ Resulting queued job or a bundle of jobs and a list of unposted files. Note that the .result property of the job or job bundle will block waiting for results.
Note:
The results are not written to CDF, to create annotations based on detected entities use `AnnotationsAPI`.
@@ -319,10 +319,10 @@ async def convert(self, detect_job: DiagramDetectResults) -> DiagramConvertResul
Will automatically wait for the detect job to complete before starting the conversion.
Args:
- detect_job (DiagramDetectResults): detect job
+ detect_job: detect job
Returns:
- DiagramConvertResults: Resulting queued job.
+ Resulting queued job.
Examples:
diff --git a/cognite/client/_api/document_preview.py b/cognite/client/_api/document_preview.py
index d86ab11d44..2e1b88d0bf 100644
--- a/cognite/client/_api/document_preview.py
+++ b/cognite/client/_api/document_preview.py
@@ -14,11 +14,11 @@ async def download_page_as_png_bytes(self, id: int, page_number: int = 1) -> byt
"""`Downloads an image preview for a specific page of the specified document. `_
Args:
- id (int): The server-generated ID for the document you want to retrieve the preview of.
- page_number (int): Page number to preview. Starting at 1 for first page.
+ id: The server-generated ID for the document you want to retrieve the preview of.
+ page_number: Page number to preview. Starting at 1 for first page.
Returns:
- bytes: The png preview of the document.
+ The png preview of the document.
Examples:
@@ -48,10 +48,10 @@ async def download_page_as_png(
"""`Downloads an image preview for a specific page of the specified document. `_
Args:
- path (Path | str | IO): The path to save the png preview of the document. If the path is a directory, the file name will be '[id]_page[page_number].png'.
- id (int): The server-generated ID for the document you want to retrieve the preview of.
- page_number (int): Page number to preview. Starting at 1 for first page.
- overwrite (bool): Whether to overwrite existing file at the given path. Defaults to False.
+ path: The path to save the png preview of the document. If the path is a directory, the file name will be '[id]_page[page_number].png'.
+ id: The server-generated ID for the document you want to retrieve the preview of.
+ page_number: Page number to preview. Starting at 1 for first page.
+ overwrite: Whether to overwrite existing file at the given path. Defaults to False.
Examples:
@@ -84,10 +84,10 @@ async def download_document_as_pdf_bytes(self, id: int) -> bytes:
Previews will be rendered if necessary during the request. Be prepared for the request to take a few seconds to complete.
Args:
- id (int): The server-generated ID for the document you want to retrieve the preview of.
+ id: The server-generated ID for the document you want to retrieve the preview of.
Returns:
- bytes: The pdf preview of the document.
+ The pdf preview of the document.
Examples:
@@ -111,9 +111,9 @@ async def download_document_as_pdf(self, path: Path | str | IO, id: int, overwri
Previews will be rendered if necessary during the request. Be prepared for the request to take a few seconds to complete.
Args:
- path (Path | str | IO): The path to save the pdf preview of the document. If the path is a directory, the file name will be '[id].pdf'.
- id (int): The server-generated ID for the document you want to retrieve the preview of.
- overwrite (bool): Whether to overwrite existing file at the given path. Defaults to False.
+ path: The path to save the pdf preview of the document. If the path is a directory, the file name will be '[id].pdf'.
+ id: The server-generated ID for the document you want to retrieve the preview of.
+ overwrite: Whether to overwrite existing file at the given path. Defaults to False.
Examples:
@@ -144,10 +144,10 @@ async def retrieve_pdf_link(self, id: int) -> TemporaryLink:
"""`Retrieve a Temporary link to download pdf preview `_
Args:
- id (int): The server-generated ID for the document you want to retrieve the preview of.
+ id: The server-generated ID for the document you want to retrieve the preview of.
Returns:
- TemporaryLink: A temporary link to download the pdf preview.
+ A temporary link to download the pdf preview.
Examples:
diff --git a/cognite/client/_api/documents.py b/cognite/client/_api/documents.py
index 16396c615d..9aeb8f1e45 100644
--- a/cognite/client/_api/documents.py
+++ b/cognite/client/_api/documents.py
@@ -66,13 +66,13 @@ async def __call__(
Fetches documents as they are iterated over, so you keep a limited number of documents in memory.
Args:
- chunk_size (int | None): Number of documents to return in each chunk. Defaults to yielding one document at a time.
- filter (Filter | dict[str, Any] | None): The filter to narrow down the documents to return.
- sort (DocumentSort | SortableProperty | tuple[SortableProperty, Literal['asc', 'desc']] | None): The property to sort by. The default order is ascending.
- limit (int | None): Maximum number of documents to return. Default to return all items.
+ chunk_size: Number of documents to return in each chunk. Defaults to yielding one document at a time.
+ filter: The filter to narrow down the documents to return.
+ sort: The property to sort by. The default order is ascending.
+ limit: Maximum number of documents to return. Default to return all items.
Yields:
- Document | DocumentList: yields Documents one by one if chunk_size is not specified, else DocumentList objects.
+ yields Documents one by one if chunk_size is not specified, else DocumentList objects.
""" # noqa: DOC404
self._validate_filter(filter)
async for item in self._list_generator(
@@ -90,11 +90,11 @@ async def aggregate_count(self, query: str | None = None, filter: Filter | dict[
"""`Count of documents matching the specified filters and search. `_
Args:
- query (str | None): The free text search query, for details see the documentation referenced above.
- filter (Filter | dict[str, Any] | None): The filter to narrow down the documents to count.
+ query: The free text search query, for details see the documentation referenced above.
+ filter: The filter to narrow down the documents to count.
Returns:
- int: The number of documents matching the specified filters and search.
+ The number of documents matching the specified filters and search.
Examples:
@@ -137,13 +137,13 @@ async def aggregate_cardinality_values(
"""`Find approximate property count for documents. `_
Args:
- property (DocumentProperty | SourceFileProperty | list[str] | str): The property to count the cardinality of.
- query (str | None): The free text search query, for details see the documentation referenced above.
- filter (Filter | dict[str, Any] | None): The filter to narrow down the documents to count cardinality.
- aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets.
+ property: The property to count the cardinality of.
+ query: The free text search query, for details see the documentation referenced above.
+ filter: The filter to narrow down the documents to count cardinality.
+ aggregate_filter: The filter to apply to the resulting buckets.
Returns:
- int: The number of documents matching the specified filters and search.
+ The number of documents matching the specified filters and search.
Examples:
@@ -190,13 +190,13 @@ async def aggregate_cardinality_properties(
"""`Find approximate paths count for documents. `_
Args:
- path (SourceFileProperty | list[str]): The scope in every document to aggregate properties. The only value allowed now is ["sourceFile", "metadata"]. It means to aggregate only metadata properties (aka keys).
- query (str | None): The free text search query, for details see the documentation referenced above.
- filter (Filter | dict[str, Any] | None): The filter to narrow down the documents to count cardinality.
- aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets.
+ path: The scope in every document to aggregate properties. The only value allowed now is ["sourceFile", "metadata"]. It means to aggregate only metadata properties (aka keys).
+ query: The free text search query, for details see the documentation referenced above.
+ filter: The filter to narrow down the documents to count cardinality.
+ aggregate_filter: The filter to apply to the resulting buckets.
Returns:
- int: The number of documents matching the specified filters and search.
+ The number of documents matching the specified filters and search.
Examples:
@@ -228,14 +228,14 @@ async def aggregate_unique_values(
"""`Get unique properties with counts for documents. `_
Args:
- property (DocumentProperty | SourceFileProperty | list[str] | str): The property to group by.
- query (str | None): The free text search query, for details see the documentation referenced above.
- filter (Filter | dict[str, Any] | None): The filter to narrow down the documents to count cardinality.
- aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets.
- limit (int): Maximum number of items. Defaults to 25.
+ property: The property to group by.
+ query: The free text search query, for details see the documentation referenced above.
+ filter: The filter to narrow down the documents to count cardinality.
+ aggregate_filter: The filter to apply to the resulting buckets.
+ limit: Maximum number of items. Defaults to 25.
Returns:
- UniqueResultList: List of unique values of documents matching the specified filters and search.
+ List of unique values of documents matching the specified filters and search.
Examples:
@@ -286,14 +286,14 @@ async def aggregate_unique_properties(
"""`Get unique paths with counts for documents. `_
Args:
- path (DocumentProperty | SourceFileProperty | list[str] | str): The scope in every document to aggregate properties. The only value allowed now is ["metadata"]. It means to aggregate only metadata properties (aka keys).
- query (str | None): The free text search query, for details see the documentation referenced above.
- filter (Filter | dict[str, Any] | None): The filter to narrow down the documents to count cardinality.
- aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets.
- limit (int): Maximum number of items. Defaults to 25.
+ path: The scope in every document to aggregate properties. The only value allowed now is ["metadata"]. It means to aggregate only metadata properties (aka keys).
+ query: The free text search query, for details see the documentation referenced above.
+ filter: The filter to narrow down the documents to count cardinality.
+ aggregate_filter: The filter to apply to the resulting buckets.
+ limit: Maximum number of items. Defaults to 25.
Returns:
- UniqueResultList: List of unique values of documents matching the specified filters and search.
+ List of unique values of documents matching the specified filters and search.
Examples:
@@ -334,12 +334,12 @@ async def retrieve_content(
you can use this endpoint.
Args:
- id (int | None): The server-generated ID for the document you want to retrieve the content of.
- external_id (str | None): External ID of the document.
- instance_id (NodeId | None): Instance ID of the document.
+ id: The server-generated ID for the document you want to retrieve the content of.
+ external_id: External ID of the document.
+ instance_id: Instance ID of the document.
Returns:
- bytes: The content of the document.
+ The content of the document.
Examples:
@@ -386,10 +386,10 @@ async def retrieve_content_buffer(
you can use this endpoint.
Args:
- buffer (BinaryIO): The document content is streamed directly into the buffer. This is useful for retrieving large documents.
- id (int | None): The server-generated ID for the document you want to retrieve the content of.
- external_id (str | None): External ID of the document.
- instance_id (NodeId | None): Instance ID of the document.
+ buffer: The document content is streamed directly into the buffer. This is useful for retrieving large documents.
+ id: The server-generated ID for the document you want to retrieve the content of.
+ external_id: External ID of the document.
+ instance_id: Instance ID of the document.
Examples:
@@ -457,14 +457,14 @@ async def search(
endpoint documentation referenced above.
Args:
- query (str): The free text search query.
- highlight (bool): Whether or not matches in search results should be highlighted.
- filter (Filter | dict[str, Any] | None): The filter to narrow down the documents to search.
- sort (DocumentSort | SortableProperty | tuple[SortableProperty, Literal['asc', 'desc']] | None): The property to sort by. The default order is ascending.
- limit (int): Maximum number of items to return. When using highlights, the maximum value is reduced to 20. Defaults to 25.
+ query: The free text search query.
+ highlight: Whether or not matches in search results should be highlighted.
+ filter: The filter to narrow down the documents to search.
+ sort: The property to sort by. The default order is ascending.
+ limit: Maximum number of items to return. When using highlights, the maximum value is reduced to 20. Defaults to 25.
Returns:
- DocumentList | DocumentHighlightList: List of search results. If highlight is True, a DocumentHighlightList is returned, otherwise a DocumentList is returned.
+ List of search results. If highlight is True, a DocumentHighlightList is returned, otherwise a DocumentList is returned.
Examples:
@@ -526,12 +526,12 @@ async def list(
project.
Args:
- filter (Filter | dict[str, Any] | None): Filter | dict[str, Any] | None): The filter to narrow down the documents to return.
- sort (DocumentSort | SortableProperty | tuple[SortableProperty, Literal['asc', 'desc']] | None): The property to sort by. The default order is ascending.
- limit (int | None): Maximum number of documents to return. Defaults to 25. Set to None or -1 to return all documents.
+ filter: The filter to narrow down the documents to return.
+ sort: The property to sort by. The default order is ascending.
+ limit: Maximum number of documents to return. Defaults to 25. Set to None or -1 to return all documents.
Returns:
- DocumentList: List of documents
+ List of documents
Examples:
diff --git a/cognite/client/_api/entity_matching.py b/cognite/client/_api/entity_matching.py
index 454e503b6b..ac3ffd881e 100644
--- a/cognite/client/_api/entity_matching.py
+++ b/cognite/client/_api/entity_matching.py
@@ -29,11 +29,11 @@ async def retrieve(self, id: int | None = None, external_id: str | None = None)
"""`Retrieve model `_
Args:
- id (int | None): id of the model to retrieve.
- external_id (str | None): external id of the model to retrieve.
+ id: id of the model to retrieve.
+ external_id: external id of the model to retrieve.
Returns:
- EntityMatchingModel | None: Model requested.
+ Model requested.
Examples:
>>> from cognite.client import CogniteClient, AsyncCogniteClient
@@ -53,11 +53,11 @@ async def retrieve_multiple(
"""`Retrieve models `_
Args:
- ids (Sequence[int] | None): ids of the model to retrieve.
- external_ids (SequenceNotStr[str] | None): external ids of the model to retrieve.
+ ids: ids of the model to retrieve.
+ external_ids: external ids of the model to retrieve.
Returns:
- EntityMatchingModelList: Models requested.
+ Models requested.
Examples:
>>> from cognite.client import CogniteClient, AsyncCogniteClient
@@ -81,11 +81,11 @@ async def update(
"""`Update model `_
Args:
- item (EntityMatchingModel | EntityMatchingModelUpdate | Sequence[EntityMatchingModel | EntityMatchingModelUpdate]): Model(s) to update
- mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (EntityMatchingModel). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`.
+ item: Model(s) to update
+ mode: How to update data when a non-update object is given (EntityMatchingModel). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`.
Returns:
- EntityMatchingModelList | EntityMatchingModel: No description.
+ No description.
Examples:
>>> from cognite.client.data_classes.contextualization import EntityMatchingModelUpdate
@@ -114,15 +114,15 @@ async def list(
"""`List models `_
Args:
- name (str | None): Optional user-defined name of model.
- description (str | None): Optional user-defined description of model.
- original_id (int | None): id of the original model for models that were created with refit.
- feature_type (str | None): feature type that defines the combination of features used.
- classifier (str | None): classifier used in training.
- limit (int | None): Maximum number of items to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
+ name: Optional user-defined name of model.
+ description: Optional user-defined description of model.
+ original_id: id of the original model for models that were created with refit.
+ feature_type: feature type that defines the combination of features used.
+ classifier: classifier used in training.
+ limit: Maximum number of items to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
Returns:
- EntityMatchingModelList: List of models.
+ List of models.
Examples:
>>> from cognite.client import CogniteClient, AsyncCogniteClient
@@ -156,7 +156,7 @@ async def list_jobs(self) -> ContextualizationJobList:
"""List jobs, typically model fit and predict runs.
Returns:
- ContextualizationJobList: List of jobs.
+ List of jobs.
"""
return ContextualizationJobList._load(
unpack_items(await self._get(self._RESOURCE_PATH + "/jobs", semaphore=self._get_semaphore("read")))
@@ -171,8 +171,8 @@ async def delete(
Args:
- id (int | Sequence[int] | None): Id or list of ids
- external_id (str | SequenceNotStr[str] | None): External ID or list of external ids
+ id: Id or list of ids
+ external_id: External ID or list of external ids
Examples:
>>> from cognite.client import CogniteClient, AsyncCogniteClient
>>> client = CogniteClient()
@@ -206,18 +206,18 @@ async def fit(
capabilities in the project, are able to access the data sent to this endpoint.
Args:
- sources (Sequence[dict | CogniteResource]): entities to match from, should have an 'id' field. Tolerant to passing more than is needed or used (e.g. json dump of time series list). Metadata fields are automatically flattened to "metadata.key" entries, such that they can be used in match_fields.
- targets (Sequence[dict | CogniteResource]): entities to match to, should have an 'id' field. Tolerant to passing more than is needed or used.
- true_matches (Sequence[dict | tuple[int | str, int | str]] | None): Known valid matches given as a list of dicts with keys 'sourceId', 'sourceExternalId', 'targetId', 'targetExternalId'). If omitted, uses an unsupervised model. A tuple can be used instead of the dictionary for convenience, interpreted as id/externalId based on type.
- match_fields (dict | Sequence[tuple[str, str]] | None): List of (from,to) keys to use in matching. Default in the API is [('name','name')]. Also accepts {"source": .., "target": ..}.
- feature_type (str | None): feature type that defines the combination of features used, see API docs for details.
- classifier (str | None): classifier used in training.
- ignore_missing_fields (bool): whether missing data in match_fields should return error or be filled in with an empty string.
- name (str | None): Optional user-defined name of model.
- description (str | None): Optional user-defined description of model.
- external_id (str | None): Optional external id. Must be unique within the project.
+ sources: entities to match from, should have an 'id' field. Tolerant to passing more than is needed or used (e.g. json dump of time series list). Metadata fields are automatically flattened to "metadata.key" entries, such that they can be used in match_fields.
+ targets: entities to match to, should have an 'id' field. Tolerant to passing more than is needed or used.
+ true_matches: Known valid matches given as a list of dicts with keys 'sourceId', 'sourceExternalId', 'targetId', 'targetExternalId'). If omitted, uses an unsupervised model. A tuple can be used instead of the dictionary for convenience, interpreted as id/externalId based on type.
+ match_fields: List of (from,to) keys to use in matching. Default in the API is [('name','name')]. Also accepts {"source": .., "target": ..}.
+ feature_type: feature type that defines the combination of features used, see API docs for details.
+ classifier: classifier used in training.
+ ignore_missing_fields: whether missing data in match_fields should return error or be filled in with an empty string.
+ name: Optional user-defined name of model.
+ description: Optional user-defined description of model.
+ external_id: Optional external id. Must be unique within the project.
Returns:
- EntityMatchingModel: Resulting queued model.
+ Resulting queued model.
Example:
>>> from cognite.client import CogniteClient, AsyncCogniteClient
@@ -279,15 +279,15 @@ async def predict(
capabilities in the project, are able to access the data sent to this endpoint.
Args:
- sources (Sequence[dict] | None): entities to match from, does not need an 'id' field. Tolerant to passing more than is needed or used (e.g. json dump of time series list). If omitted, will use data from fit.
- targets (Sequence[dict] | None): entities to match to, does not need an 'id' field. Tolerant to passing more than is needed or used. If omitted, will use data from fit.
- num_matches (int): number of matches to return for each item.
- score_threshold (float | None): only return matches with a score above this threshold
- id (int | None): id of the model to use.
- external_id (str | None): external id of the model to use.
+ sources: entities to match from, does not need an 'id' field. Tolerant to passing more than is needed or used (e.g. json dump of time series list). If omitted, will use data from fit.
+ targets: entities to match to, does not need an 'id' field. Tolerant to passing more than is needed or used. If omitted, will use data from fit.
+ num_matches: number of matches to return for each item.
+ score_threshold: only return matches with a score above this threshold
+ id: id of the model to use.
+ external_id: external id of the model to use.
Returns:
- EntityMatchingPredictionResult: object which can be used to wait for and retrieve results.
+ object which can be used to wait for and retrieve results.
Examples:
>>> from cognite.client import CogniteClient, AsyncCogniteClient
@@ -334,11 +334,11 @@ async def refit(
capabilities in the project, are able to access the data sent to this endpoint.
Args:
- true_matches (Sequence[dict | tuple[int | str, int | str]]): Updated known valid matches given as a list of dicts with keys 'fromId', 'fromExternalId', 'toId', 'toExternalId'). A tuple can be used instead of the dictionary for convenience, interpreted as id/externalId based on type.
- id (int | None): id of the model to use.
- external_id (str | None): external id of the model to use.
+ true_matches: Updated known valid matches given as a list of dicts with keys 'fromId', 'fromExternalId', 'toId', 'toExternalId'). A tuple can be used instead of the dictionary for convenience, interpreted as id/externalId based on type.
+ id: id of the model to use.
+ external_id: external id of the model to use.
Returns:
- EntityMatchingModel: new model refitted to true_matches.
+ new model refitted to true_matches.
Examples:
>>> from cognite.client import CogniteClient, AsyncCogniteClient
diff --git a/cognite/client/_api/events.py b/cognite/client/_api/events.py
index 9f5873c3be..642a910e76 100644
--- a/cognite/client/_api/events.py
+++ b/cognite/client/_api/events.py
@@ -113,29 +113,29 @@ async def __call__(
Fetches events as they are iterated over, so you keep a limited number of events in memory.
Args:
- chunk_size (int | None): Number of events to return in each chunk. Defaults to yielding one event a time.
- start_time (dict[str, Any] | TimestampRange | None): Range between two timestamps
- end_time (dict[str, Any] | EndTimeFilter | None): Range between two timestamps
- active_at_time (dict[str, Any] | TimestampRange | None): Event is considered active from its startTime to endTime inclusive. If startTime is null, event is never active. If endTime is null, event is active from startTime onwards. activeAtTime filter will match all events that are active at some point from min to max, from min, or to max, depending on which of min and max parameters are specified.
- type (str | None): Type of the event, e.g 'failure'.
- subtype (str | None): Subtype of the event, e.g 'electrical'.
- metadata (dict[str, str] | None): Customizable extra data about the event. String key -> String value.
- asset_ids (Sequence[int] | None): Asset IDs of related equipments that this event relates to.
- asset_external_ids (SequenceNotStr[str] | None): Asset External IDs of related equipment that this event relates to.
- asset_subtree_ids (int | Sequence[int] | None): Only include events that have a related asset in a subtree rooted at any of these assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned.
- asset_subtree_external_ids (str | SequenceNotStr[str] | None): Only include events that have a related asset in a subtree rooted at any of these assetExternalIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned.
- data_set_ids (int | Sequence[int] | None): Return only events in the specified data set(s) with this id / these ids.
- data_set_external_ids (str | SequenceNotStr[str] | None): Return only events in the specified data set(s) with this external id / these external ids.
- source (str | None): The source of this event.
- created_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms.
- last_updated_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms.
- external_id_prefix (str | None): External Id provided by client. Should be unique within the project
- sort (SortSpec | list[SortSpec] | None): The criteria to sort by. Defaults to desc for `_score_` and asc for all other properties. Sort is not allowed if `partitions` is used.
- limit (int | None): Maximum number of events to return. Defaults to return all items.
- advanced_filter (Filter | dict[str, Any] | None): Advanced filter query using the filter DSL (Domain Specific Language). It allows defining complex filtering expressions that combine simple operations, such as equals, prefix, exists, etc., using boolean operators and, or, and not.
+ chunk_size: Number of events to return in each chunk. Defaults to yielding one event a time.
+ start_time: Range between two timestamps
+ end_time: Range between two timestamps
+ active_at_time: Event is considered active from its startTime to endTime inclusive. If startTime is null, event is never active. If endTime is null, event is active from startTime onwards. activeAtTime filter will match all events that are active at some point from min to max, from min, or to max, depending on which of min and max parameters are specified.
+ type: Type of the event, e.g 'failure'.
+ subtype: Subtype of the event, e.g 'electrical'.
+ metadata: Customizable extra data about the event. String key -> String value.
+ asset_ids: Asset IDs of related equipments that this event relates to.
+ asset_external_ids: Asset External IDs of related equipment that this event relates to.
+ asset_subtree_ids: Only include events that have a related asset in a subtree rooted at any of these assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned.
+ asset_subtree_external_ids: Only include events that have a related asset in a subtree rooted at any of these assetExternalIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned.
+ data_set_ids: Return only events in the specified data set(s) with this id / these ids.
+ data_set_external_ids: Return only events in the specified data set(s) with this external id / these external ids.
+ source: The source of this event.
+ created_time: Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms.
+ last_updated_time: Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms.
+ external_id_prefix: External Id provided by client. Should be unique within the project
+ sort: The criteria to sort by. Defaults to desc for `_score_` and asc for all other properties. Sort is not allowed if `partitions` is used.
+ limit: Maximum number of events to return. Defaults to return all items.
+ advanced_filter: Advanced filter query using the filter DSL (Domain Specific Language). It allows defining complex filtering expressions that combine simple operations, such as equals, prefix, exists, etc., using boolean operators and, or, and not.
Yields:
- Event | EventList: yields Event one by one if chunk_size is not specified, else EventList objects.
+ yields Event one by one if chunk_size is not specified, else EventList objects.
""" # noqa: DOC404
asset_subtree_ids_processed = process_asset_subtree_ids(asset_subtree_ids, asset_subtree_external_ids)
data_set_ids_processed = process_data_set_ids(data_set_ids, data_set_external_ids)
@@ -176,11 +176,11 @@ async def retrieve(self, id: int | None = None, external_id: str | None = None)
"""`Retrieve a single event by id. `_
Args:
- id (int | None): ID
- external_id (str | None): External ID
+ id: ID
+ external_id: External ID
Returns:
- Event | None: Requested event or None if it does not exist.
+ Requested event or None if it does not exist.
Examples:
@@ -207,12 +207,12 @@ async def retrieve_multiple(
"""`Retrieve multiple events by id. `_
Args:
- ids (Sequence[int] | None): IDs
- external_ids (SequenceNotStr[str] | None): External IDs
- ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception.
+ ids: IDs
+ external_ids: External IDs
+ ignore_unknown_ids: Ignore IDs and external IDs that are not found rather than throw an exception.
Returns:
- EventList: The requested events.
+ The requested events.
Examples:
@@ -242,13 +242,13 @@ async def aggregate_unique_values(
"""`Get unique properties with counts for events. `_
Args:
- filter (EventFilter | dict[str, Any] | None): The filter to narrow down the events to count requiring exact match.
- property (EventPropertyLike | None): The property name(s) to apply the aggregation on.
- advanced_filter (Filter | dict[str, Any] | None): The filter to narrow down the events to consider.
- aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets.
+ filter: The filter to narrow down the events to count requiring exact match.
+ property: The property name(s) to apply the aggregation on.
+ advanced_filter: The filter to narrow down the events to consider.
+ aggregate_filter: The filter to apply to the resulting buckets.
Returns:
- UniqueResultList: List of unique values of events matching the specified filters and search.
+ List of unique values of events matching the specified filters and search.
Examples:
@@ -301,13 +301,12 @@ async def aggregate_count(
"""`Count of event matching the specified filters. `_
Args:
- property (EventPropertyLike | None): If specified, Get an approximate number of Events with a specific property
- (property is not null) and matching the filters.
- advanced_filter (Filter | dict[str, Any] | None): The filter to narrow down the events to count.
- filter (EventFilter | dict[str, Any] | None): The filter to narrow down the events to count requiring exact match.
+ property: If specified, Get an approximate number of Events with a specific property (property is not null) and matching the filters.
+ advanced_filter: The filter to narrow down the events to count.
+ filter: The filter to narrow down the events to count requiring exact match.
Returns:
- int: The number of events matching the specified filters and search.
+ The number of events matching the specified filters and search.
Examples:
@@ -343,12 +342,12 @@ async def aggregate_cardinality_values(
"""`Find approximate property count for events. `_
Args:
- property (EventPropertyLike): The property to count the cardinality of.
- advanced_filter (Filter | dict[str, Any] | None): The filter to narrow down the events to count cardinality.
- aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets.
- filter (EventFilter | dict[str, Any] | None): The filter to narrow down the events to count requiring exact match.
+ property: The property to count the cardinality of.
+ advanced_filter: The filter to narrow down the events to count cardinality.
+ aggregate_filter: The filter to apply to the resulting buckets.
+ filter: The filter to narrow down the events to count requiring exact match.
Returns:
- int: The number of properties matching the specified filter.
+ The number of properties matching the specified filter.
Examples:
@@ -388,13 +387,12 @@ async def aggregate_cardinality_properties(
"""`Find approximate paths count for events. `_
Args:
- path (EventPropertyLike): The scope in every document to aggregate properties. The only value allowed now is ["metadata"].
- It means to aggregate only metadata properties (aka keys).
- advanced_filter (Filter | dict[str, Any] | None): The filter to narrow down the events to count cardinality.
- aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets.
- filter (EventFilter | dict[str, Any] | None): The filter to narrow down the events to count requiring exact match.
+ path: The scope in every document to aggregate properties. The only value allowed now is ["metadata"]. It means to aggregate only metadata properties (aka keys).
+ advanced_filter: The filter to narrow down the events to count cardinality.
+ aggregate_filter: The filter to apply to the resulting buckets.
+ filter: The filter to narrow down the events to count requiring exact match.
Returns:
- int: The number of properties matching the specified filters and search.
+ The number of properties matching the specified filters and search.
Examples:
@@ -426,14 +424,13 @@ async def aggregate_unique_properties(
"""`Get unique paths with counts for events. `_
Args:
- path (EventPropertyLike): The scope in every document to aggregate properties. The only value allowed now is ["metadata"].
- It means to aggregate only metadata properties (aka keys).
- advanced_filter (Filter | dict[str, Any] | None): The filter to narrow down the events to count cardinality.
- aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets.
- filter (EventFilter | dict[str, Any] | None): The filter to narrow down the events to count requiring exact match.
+ path: The scope in every document to aggregate properties. The only value allowed now is ["metadata"]. It means to aggregate only metadata properties (aka keys).
+ advanced_filter: The filter to narrow down the events to count cardinality.
+ aggregate_filter: The filter to apply to the resulting buckets.
+ filter: The filter to narrow down the events to count requiring exact match.
Returns:
- UniqueResultList: List of unique values of events matching the specified filters and search.
+ List of unique values of events matching the specified filters and search.
Examples:
@@ -465,10 +462,10 @@ async def create(self, event: Event | EventWrite | Sequence[Event] | Sequence[Ev
"""`Create one or more events. `_
Args:
- event (Event | EventWrite | Sequence[Event] | Sequence[EventWrite]): Event or list of events to create.
+ event: Event or list of events to create.
Returns:
- Event | EventList: Created event(s)
+ Created event(s)
Examples:
@@ -494,9 +491,9 @@ async def delete(
"""`Delete one or more events `_
Args:
- id (int | Sequence[int] | None): Id or list of ids
- external_id (str | SequenceNotStr[str] | None): External ID or list of external ids
- ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception.
+ id: Id or list of ids
+ external_id: External ID or list of external ids
+ ignore_unknown_ids: Ignore IDs and external IDs that are not found rather than throw an exception.
Examples:
@@ -535,11 +532,11 @@ async def update(
"""`Update one or more events `_
Args:
- item (Event | EventWrite | EventUpdate | Sequence[Event | EventWrite | EventUpdate]): Event(s) to update
- mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (Event or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`.
+ item: Event(s) to update
+ mode: How to update data when a non-update object is given (Event or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`.
Returns:
- Event | EventList: Updated event(s)
+ Updated event(s)
Examples:
@@ -572,12 +569,12 @@ async def search(
Primarily meant for human-centric use-cases and data exploration, not for programs, since matching and ordering may change over time. Use the `list` function if stable or exact matches are required.
Args:
- description (str | None): Fuzzy match on description.
- filter (EventFilter | dict[str, Any] | None): Filter to apply. Performs exact match on these fields.
- limit (int): Maximum number of results to return.
+ description: Fuzzy match on description.
+ filter: Filter to apply. Performs exact match on these fields.
+ limit: Maximum number of results to return.
Returns:
- EventList: List of requested events
+ List of requested events
Examples:
@@ -610,11 +607,11 @@ async def upsert(
For more details, see :ref:`appendix-upsert`.
Args:
- item (Event | EventWrite | Sequence[Event | EventWrite]): Event or list of events to upsert.
- mode (Literal['patch', 'replace']): Whether to patch or replace in the case the events are existing. If you set 'patch', the call will only update fields with non-null values (default). Setting 'replace' will unset any fields that are not specified.
+ item: Event or list of events to upsert.
+ mode: Whether to patch or replace in the case the events are existing. If you set 'patch', the call will only update fields with non-null values (default). Setting 'replace' will unset any fields that are not specified.
Returns:
- Event | EventList: The upserted event(s).
+ The upserted event(s).
Examples:
@@ -667,29 +664,29 @@ async def list(
"""`List events `_
Args:
- start_time (dict[str, Any] | TimestampRange | None): Range between two timestamps.
- end_time (dict[str, Any] | EndTimeFilter | None): Range between two timestamps.
- active_at_time (dict[str, Any] | TimestampRange | None): Event is considered active from its startTime to endTime inclusive. If startTime is null, event is never active. If endTime is null, event is active from startTime onwards. activeAtTime filter will match all events that are active at some point from min to max, from min, or to max, depending on which of min and max parameters are specified.
- type (str | None): Type of the event, e.g 'failure'.
- subtype (str | None): Subtype of the event, e.g 'electrical'.
- metadata (dict[str, str] | None): Customizable extra data about the event. String key -> String value.
- asset_ids (Sequence[int] | None): Asset IDs of related equipments that this event relates to.
- asset_external_ids (SequenceNotStr[str] | None): Asset External IDs of related equipment that this event relates to.
- asset_subtree_ids (int | Sequence[int] | None): Only include events that have a related asset in a subtree rooted at any of these assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned.
- asset_subtree_external_ids (str | SequenceNotStr[str] | None): Only include events that have a related asset in a subtree rooted at any of these assetExternalIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned.
- data_set_ids (int | Sequence[int] | None): Return only events in the specified data set(s) with this id / these ids.
- data_set_external_ids (str | SequenceNotStr[str] | None): Return only events in the specified data set(s) with this external id / these external ids.
- source (str | None): The source of this event.
- created_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms.
- last_updated_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms.
- external_id_prefix (str | None): External Id provided by client. Should be unique within the project.
- sort (SortSpec | list[SortSpec] | None): The criteria to sort by. Defaults to desc for `_score_` and asc for all other properties. Sort is not allowed if `partitions` is used.
- partitions (int | None): Retrieve resources in parallel using this number of workers (values up to 10 allowed), limit must be set to `None` (or `-1`).
- limit (int | None): Maximum number of events to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
- advanced_filter (Filter | dict[str, Any] | None): Advanced filter query using the filter DSL (Domain Specific Language). It allows defining complex filtering expressions that combine simple operations, such as equals, prefix, exists, etc., using boolean operators and, or, and not. See examples below for usage.
+ start_time: Range between two timestamps.
+ end_time: Range between two timestamps.
+ active_at_time: Event is considered active from its startTime to endTime inclusive. If startTime is null, event is never active. If endTime is null, event is active from startTime onwards. activeAtTime filter will match all events that are active at some point from min to max, from min, or to max, depending on which of min and max parameters are specified.
+ type: Type of the event, e.g 'failure'.
+ subtype: Subtype of the event, e.g 'electrical'.
+ metadata: Customizable extra data about the event. String key -> String value.
+ asset_ids: Asset IDs of related equipments that this event relates to.
+ asset_external_ids: Asset External IDs of related equipment that this event relates to.
+ asset_subtree_ids: Only include events that have a related asset in a subtree rooted at any of these assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned.
+ asset_subtree_external_ids: Only include events that have a related asset in a subtree rooted at any of these assetExternalIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned.
+ data_set_ids: Return only events in the specified data set(s) with this id / these ids.
+ data_set_external_ids: Return only events in the specified data set(s) with this external id / these external ids.
+ source: The source of this event.
+ created_time: Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms.
+ last_updated_time: Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms.
+ external_id_prefix: External Id provided by client. Should be unique within the project.
+ sort: The criteria to sort by. Defaults to desc for `_score_` and asc for all other properties. Sort is not allowed if `partitions` is used.
+ partitions: Retrieve resources in parallel using this number of workers (values up to 10 allowed), limit must be set to `None` (or `-1`).
+ limit: Maximum number of events to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
+ advanced_filter: Advanced filter query using the filter DSL (Domain Specific Language). It allows defining complex filtering expressions that combine simple operations, such as equals, prefix, exists, etc., using boolean operators and, or, and not. See examples below for usage.
Returns:
- EventList: List of requested events
+ List of requested events
.. note::
When using `partitions`, there are few considerations to keep in mind:
diff --git a/cognite/client/_api/extractionpipelines/__init__.py b/cognite/client/_api/extractionpipelines/__init__.py
index 24ea0ab9f8..12213f6971 100644
--- a/cognite/client/_api/extractionpipelines/__init__.py
+++ b/cognite/client/_api/extractionpipelines/__init__.py
@@ -48,11 +48,11 @@ async def __call__(
"""Iterate over extraction pipelines
Args:
- chunk_size (int | None): Number of extraction pipelines to yield per chunk. Defaults to yielding extraction pipelines one by one.
- limit (int | None): Limits the number of results to be returned. Defaults to yielding all extraction pipelines.
+ chunk_size: Number of extraction pipelines to yield per chunk. Defaults to yielding extraction pipelines one by one.
+ limit: Limits the number of results to be returned. Defaults to yielding all extraction pipelines.
Yields:
- ExtractionPipeline | ExtractionPipelineList: Yields extraction pipelines one by one or in chunks up to the chunk size.
+ Yields extraction pipelines one by one or in chunks up to the chunk size.
""" # noqa: DOC404
async for item in self._list_generator(
method="GET",
@@ -67,11 +67,11 @@ async def retrieve(self, id: int | None = None, external_id: str | None = None)
"""`Retrieve a single extraction pipeline by id. `_
Args:
- id (int | None): ID
- external_id (str | None): External ID
+ id: ID
+ external_id: External ID
Returns:
- ExtractionPipeline | None: Requested extraction pipeline or None if it does not exist.
+ Requested extraction pipeline or None if it does not exist.
Examples:
@@ -101,12 +101,12 @@ async def retrieve_multiple(
"""`Retrieve multiple extraction pipelines by ids and external ids. `_
Args:
- ids (Sequence[int] | None): IDs
- external_ids (SequenceNotStr[str] | None): External IDs
- ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception.
+ ids: IDs
+ external_ids: External IDs
+ ignore_unknown_ids: Ignore IDs and external IDs that are not found rather than throw an exception.
Returns:
- ExtractionPipelineList: The requested ExtractionPipelines.
+ The requested ExtractionPipelines.
Examples:
@@ -133,10 +133,10 @@ async def list(self, limit: int | None = DEFAULT_LIMIT_READ) -> ExtractionPipeli
"""`List extraction pipelines `_
Args:
- limit (int | None): Maximum number of ExtractionPipelines to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
+ limit: Maximum number of ExtractionPipelines to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
Returns:
- ExtractionPipelineList: List of requested ExtractionPipelines
+ List of requested ExtractionPipelines
Examples:
@@ -172,10 +172,10 @@ async def create(
You can create an arbitrary number of extraction pipelines, and the SDK will split the request into multiple requests if necessary.
Args:
- extraction_pipeline (ExtractionPipeline | ExtractionPipelineWrite | Sequence[ExtractionPipeline] | Sequence[ExtractionPipelineWrite]): Extraction pipeline or list of extraction pipelines to create.
+ extraction_pipeline: Extraction pipeline or list of extraction pipelines to create.
Returns:
- ExtractionPipeline | ExtractionPipelineList: Created extraction pipeline(s)
+ Created extraction pipeline(s)
Examples:
@@ -203,8 +203,8 @@ async def delete(
"""`Delete one or more extraction pipelines `_
Args:
- id (int | Sequence[int] | None): Id or list of ids
- external_id (str | SequenceNotStr[str] | None): External ID or list of external ids
+ id: Id or list of ids
+ external_id: External ID or list of external ids
Examples:
@@ -240,11 +240,11 @@ async def update(
"""`Update one or more extraction pipelines `_
Args:
- item (ExtractionPipeline | ExtractionPipelineWrite | ExtractionPipelineUpdate | Sequence[ExtractionPipeline | ExtractionPipelineWrite | ExtractionPipelineUpdate]): Extraction pipeline(s) to update
- mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (ExtractionPipeline or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`.
+ item: Extraction pipeline(s) to update
+ mode: How to update data when a non-update object is given (ExtractionPipeline or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`.
Returns:
- ExtractionPipeline | ExtractionPipelineList: Updated extraction pipeline(s)
+ Updated extraction pipeline(s)
Examples:
diff --git a/cognite/client/_api/extractionpipelines/configs.py b/cognite/client/_api/extractionpipelines/configs.py
index f2c0c24875..bd79d0e278 100644
--- a/cognite/client/_api/extractionpipelines/configs.py
+++ b/cognite/client/_api/extractionpipelines/configs.py
@@ -20,12 +20,12 @@ async def retrieve(
By default the latest configuration revision is retrieved, or you can specify a timestamp or a revision number.
Args:
- external_id (str): External id of the extraction pipeline to retrieve config from.
- revision (int | None): Optionally specify a revision number to retrieve.
- active_at_time (int | None): Optionally specify a timestamp the configuration revision should be active.
+ external_id: External id of the extraction pipeline to retrieve config from.
+ revision: Optionally specify a revision number to retrieve.
+ active_at_time: Optionally specify a timestamp the configuration revision should be active.
Returns:
- ExtractionPipelineConfig: Retrieved extraction pipeline configuration revision
+ Retrieved extraction pipeline configuration revision
Examples:
@@ -47,10 +47,10 @@ async def list(self, external_id: str) -> ExtractionPipelineConfigRevisionList:
"""`Retrieve all configuration revisions from an extraction pipeline `
Args:
- external_id (str): External id of the extraction pipeline to retrieve config from.
+ external_id: External id of the extraction pipeline to retrieve config from.
Returns:
- ExtractionPipelineConfigRevisionList: Retrieved extraction pipeline configuration revisions
+ Retrieved extraction pipeline configuration revisions
Examples:
@@ -74,10 +74,10 @@ async def create(
"""`Create a new configuration revision `
Args:
- config (ExtractionPipelineConfig | ExtractionPipelineConfigWrite): Configuration revision to create.
+ config: Configuration revision to create.
Returns:
- ExtractionPipelineConfig: Created extraction pipeline configuration revision
+ Created extraction pipeline configuration revision
Examples:
@@ -99,11 +99,11 @@ async def revert(self, external_id: str, revision: int) -> ExtractionPipelineCon
"""`Revert to a previous configuration revision `
Args:
- external_id (str): External id of the extraction pipeline to revert revision for.
- revision (int): Revision to revert to.
+ external_id: External id of the extraction pipeline to revert revision for.
+ revision: Revision to revert to.
Returns:
- ExtractionPipelineConfig: New latest extraction pipeline configuration revision.
+ New latest extraction pipeline configuration revision.
Examples:
diff --git a/cognite/client/_api/extractionpipelines/runs.py b/cognite/client/_api/extractionpipelines/runs.py
index a4e2efcfa7..12e6e56af1 100644
--- a/cognite/client/_api/extractionpipelines/runs.py
+++ b/cognite/client/_api/extractionpipelines/runs.py
@@ -37,15 +37,14 @@ async def list(
"""`List runs for an extraction pipeline with given external_id `_
Args:
- external_id (str): Extraction pipeline external Id.
- statuses (RunStatus | Sequence[RunStatus] | SequenceNotStr[str] | None): One or more among "success" / "failure" / "seen".
- message_substring (str | None): Failure message part.
- created_time (dict[str, Any] | TimestampRange | str | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as timestamps in ms.
- If a string is passed, it is assumed to be the minimum value.
- limit (int | None): Maximum number of ExtractionPipelines to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
+ external_id: Extraction pipeline external Id.
+ statuses: One or more among "success" / "failure" / "seen".
+ message_substring: Failure message part.
+ created_time: Range between two timestamps. Possible keys are `min` and `max`, with values given as timestamps in ms. If a string is passed, it is assumed to be the minimum value.
+ limit: Maximum number of ExtractionPipelines to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
Returns:
- ExtractionPipelineRunList: List of requested extraction pipeline runs
+ List of requested extraction pipeline runs
Tip:
The ``created_time`` parameter can also be passed as a string, to support the most typical usage pattern
@@ -116,10 +115,10 @@ async def create(
You can create an arbitrary number of extraction pipeline runs, and the SDK will split the request into multiple requests.
Args:
- run (ExtractionPipelineRun | ExtractionPipelineRunWrite | Sequence[ExtractionPipelineRun] | Sequence[ExtractionPipelineRunWrite]): ExtractionPipelineRun| ExtractionPipelineRunWrite | Sequence[ExtractionPipelineRun] | Sequence[ExtractionPipelineRunWrite]): Extraction pipeline or list of extraction pipeline runs to create.
+ run: Extraction pipeline or list of extraction pipeline runs to create.
Returns:
- ExtractionPipelineRun | ExtractionPipelineRunList: Created extraction pipeline run(s)
+ Created extraction pipeline run(s)
Examples:
diff --git a/cognite/client/_api/files.py b/cognite/client/_api/files.py
index 2d1f853db9..bffc478a5e 100644
--- a/cognite/client/_api/files.py
+++ b/cognite/client/_api/files.py
@@ -120,31 +120,31 @@ async def __call__(
Fetches file metadata objects as they are iterated over, so you keep a limited number of metadata objects in memory.
Args:
- chunk_size (int | None): Number of files to return in each chunk. Defaults to yielding one event a time.
- name (str | None): Name of the file.
- mime_type (str | None): File type. E.g. text/plain, application/pdf, ..
- metadata (dict[str, str] | None): Custom, application specific metadata. String key -> String value
- asset_ids (Sequence[int] | None): Only include files that reference these specific asset IDs.
- asset_external_ids (SequenceNotStr[str] | None): No description.
- asset_subtree_ids (int | Sequence[int] | None): Only include files that have a related asset in a subtree rooted at any of these assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned.
- asset_subtree_external_ids (str | SequenceNotStr[str] | None): Only include files that have a related asset in a subtree rooted at any of these assetExternalIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned.
- data_set_ids (int | Sequence[int] | None): Return only files in the specified data set(s) with this id / these ids.
- data_set_external_ids (str | SequenceNotStr[str] | None): Return only files in the specified data set(s) with this external id / these external ids.
- labels (LabelFilter | None): Return only the files matching the specified label(s).
- geo_location (GeoLocationFilter | None): Only include files matching the specified geographic relation.
- source (str | None): The source of this event.
- created_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms.
- last_updated_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms.
- source_created_time (dict[str, Any] | TimestampRange | None): Filter for files where the sourceCreatedTime field has been set and is within the specified range.
- source_modified_time (dict[str, Any] | TimestampRange | None): Filter for files where the sourceModifiedTime field has been set and is within the specified range.
- uploaded_time (dict[str, Any] | TimestampRange | None): Range between two timestamps
- external_id_prefix (str | None): External Id provided by client. Should be unique within the project.
- directory_prefix (str | None): Filter by this (case-sensitive) prefix for the directory provided by the client.
- uploaded (bool | None): Whether or not the actual file is uploaded. This field is returned only by the API, it has no effect in a post body.
- limit (int | None): Maximum number of files to return. Defaults to return all items.
+ chunk_size: Number of files to return in each chunk. Defaults to yielding one event a time.
+ name: Name of the file.
+ mime_type: File type. E.g. text/plain, application/pdf, ..
+ metadata: Custom, application specific metadata. String key -> String value
+ asset_ids: Only include files that reference these specific asset IDs.
+ asset_external_ids: No description.
+ asset_subtree_ids: Only include files that have a related asset in a subtree rooted at any of these assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned.
+ asset_subtree_external_ids: Only include files that have a related asset in a subtree rooted at any of these assetExternalIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned.
+ data_set_ids: Return only files in the specified data set(s) with this id / these ids.
+ data_set_external_ids: Return only files in the specified data set(s) with this external id / these external ids.
+ labels: Return only the files matching the specified label(s).
+ geo_location: Only include files matching the specified geographic relation.
+ source: The source of this event.
+ created_time: Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms.
+ last_updated_time: Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms.
+ source_created_time: Filter for files where the sourceCreatedTime field has been set and is within the specified range.
+ source_modified_time: Filter for files where the sourceModifiedTime field has been set and is within the specified range.
+ uploaded_time: Range between two timestamps
+ external_id_prefix: External Id provided by client. Should be unique within the project.
+ directory_prefix: Filter by this (case-sensitive) prefix for the directory provided by the client.
+ uploaded: Whether or not the actual file is uploaded. This field is returned only by the API, it has no effect in a post body.
+ limit: Maximum number of files to return. Defaults to return all items.
Yields:
- FileMetadata | FileMetadataList: yields FileMetadata one by one if chunk_size is not specified, else FileMetadataList objects.
+ yields FileMetadata one by one if chunk_size is not specified, else FileMetadataList objects.
""" # noqa: DOC404
asset_subtree_ids_processed = process_asset_subtree_ids(asset_subtree_ids, asset_subtree_external_ids)
data_set_ids_processed = process_data_set_ids(data_set_ids, data_set_external_ids)
@@ -186,11 +186,11 @@ async def create(
"""Create file without uploading content.
Args:
- file_metadata (FileMetadata | FileMetadataWrite): File metadata for the file to create.
- overwrite (bool): If 'overwrite' is set to true, and the POST body content specifies a 'externalId' field, fields for the file found for externalId can be overwritten. The default setting is false. If metadata is included in the request body, all of the original metadata will be overwritten. File-Asset mappings only change if explicitly stated in the assetIds field of the POST json body. Do not set assetIds in request body if you want to keep the current file-asset mappings.
+ file_metadata: File metadata for the file to create.
+ overwrite: If 'overwrite' is set to true, and the POST body content specifies a 'externalId' field, fields for the file found for externalId can be overwritten. The default setting is false. If metadata is included in the request body, all of the original metadata will be overwritten. File-Asset mappings only change if explicitly stated in the assetIds field of the POST json body. Do not set assetIds in request body if you want to keep the current file-asset mappings.
Returns:
- tuple[FileMetadata, str]: Tuple containing the file metadata and upload url of the created file.
+ Tuple containing the file metadata and upload url of the created file.
Examples:
@@ -223,12 +223,12 @@ async def retrieve(
"""`Retrieve a single file metadata by id. `_
Args:
- id (int | None): ID
- external_id (str | None): External ID
- instance_id (NodeId | None): Instance ID
+ id: ID
+ external_id: External ID
+ instance_id: Instance ID
Returns:
- FileMetadata | None: Requested file metadata or None if it does not exist.
+ Requested file metadata or None if it does not exist.
Examples:
@@ -258,13 +258,13 @@ async def retrieve_multiple(
"""`Retrieve multiple file metadatas by id. `_
Args:
- ids (Sequence[int] | None): IDs
- external_ids (SequenceNotStr[str] | None): External IDs
- instance_ids (Sequence[NodeId] | None): Instance IDs
- ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception.
+ ids: IDs
+ external_ids: External IDs
+ instance_ids: Instance IDs
+ ignore_unknown_ids: Ignore IDs and external IDs that are not found rather than throw an exception.
Returns:
- FileMetadataList: The requested file metadatas.
+ The requested file metadatas.
Examples:
@@ -291,10 +291,10 @@ async def aggregate_count(self, filter: FileMetadataFilter | dict[str, Any] | No
"""`Aggregate files `_
Args:
- filter (FileMetadataFilter | dict[str, Any] | None): Filter on file metadata filter with exact match
+ filter: Filter on file metadata filter with exact match
Returns:
- int: Count of files matching the filter.
+ Count of files matching the filter.
Examples:
@@ -316,9 +316,9 @@ async def delete(
"""`Delete files `_
Args:
- id (int | Sequence[int] | None): Id or list of ids
- external_id (str | SequenceNotStr[str] | None): str or list of str
- ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception.
+ id: Id or list of ids
+ external_id: str or list of str
+ ignore_unknown_ids: Ignore IDs and external IDs that are not found rather than throw an exception.
Examples:
@@ -361,11 +361,11 @@ async def update(
Currently, a full replacement of labels on a file is not supported (only partial add/remove updates). See the example below on how to perform partial labels update.
Args:
- item (FileMetadata | FileMetadataWrite | FileMetadataUpdate | Sequence[FileMetadata | FileMetadataWrite | FileMetadataUpdate]): file(s) to update.
- mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (FilesMetadata or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`.
+ item: file(s) to update.
+ mode: How to update data when a non-update object is given (FilesMetadata or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`.
Returns:
- FileMetadata | FileMetadataList: The updated files.
+ The updated files.
Examples:
@@ -415,12 +415,12 @@ async def search(
Primarily meant for human-centric use-cases and data exploration, not for programs, since matching and ordering may change over time. Use the `list` function if stable or exact matches are required.
Args:
- name (str | None): Prefix and fuzzy search on name.
- filter (FileMetadataFilter | dict[str, Any] | None): Filter to apply. Performs exact match on these fields.
- limit (int): Max number of results to return.
+ name: Prefix and fuzzy search on name.
+ filter: Filter to apply. Performs exact match on these fields.
+ limit: Max number of results to return.
Returns:
- FileMetadataList: List of requested files metadata.
+ List of requested files metadata.
Examples:
@@ -447,11 +447,11 @@ async def upload_content(
"""`Upload a file content `_
Args:
- path (Path | str): Path to the file you wish to upload.
- external_id (str | None): The external ID provided by the client. Must be unique within the project.
- instance_id (NodeId | None): Instance ID of the file.
+ path: Path to the file you wish to upload.
+ external_id: The external ID provided by the client. Must be unique within the project.
+ instance_id: Instance ID of the file.
Returns:
- FileMetadata: No description.
+ No description.
"""
path = Path(path)
if path.is_file():
@@ -483,25 +483,25 @@ async def upload(
"""`Upload a file `_
Args:
- path (Path | str): Path to the file you wish to upload. If path is a directory, this method will upload all files in that directory.
- external_id (str | None): The external ID provided by the client. Must be unique within the project.
- name (str | None): Name of the file.
- source (str | None): The source of the file.
- mime_type (str | None): File type. E.g. text/plain, application/pdf, ...
- metadata (dict[str, str] | None): Customizable extra data about the file. String key -> String value.
- directory (str | None): The directory to be associated with this file. Must be an absolute, unix-style path.
- asset_ids (Sequence[int] | None): No description.
- source_created_time (int | None): The timestamp for when the file was originally created in the source system.
- source_modified_time (int | None): The timestamp for when the file was last modified in the source system.
- data_set_id (int | None): ID of the data set.
- labels (Sequence[Label] | None): A list of the labels associated with this resource item.
- geo_location (GeoLocation | None): The geographic metadata of the file.
- security_categories (Sequence[int] | None): Security categories to attach to this file.
- recursive (bool): If path is a directory, upload all contained files recursively.
- overwrite (bool): If 'overwrite' is set to true, and the POST body content specifies a 'externalId' field, fields for the file found for externalId can be overwritten. The default setting is false. If metadata is included in the request body, all of the original metadata will be overwritten. The actual file will be overwritten after successful upload. If there is no successful upload, the current file contents will be kept. File-Asset mappings only change if explicitly stated in the assetIds field of the POST json body. Do not set assetIds in request body if you want to keep the current file-asset mappings.
+ path: Path to the file you wish to upload. If path is a directory, this method will upload all files in that directory.
+ external_id: The external ID provided by the client. Must be unique within the project.
+ name: Name of the file.
+ source: The source of the file.
+ mime_type: File type. E.g. text/plain, application/pdf, ...
+ metadata: Customizable extra data about the file. String key -> String value.
+ directory: The directory to be associated with this file. Must be an absolute, unix-style path.
+ asset_ids: No description.
+ source_created_time: The timestamp for when the file was originally created in the source system.
+ source_modified_time: The timestamp for when the file was last modified in the source system.
+ data_set_id: ID of the data set.
+ labels: A list of the labels associated with this resource item.
+ geo_location: The geographic metadata of the file.
+ security_categories: Security categories to attach to this file.
+ recursive: If path is a directory, upload all contained files recursively.
+ overwrite: If 'overwrite' is set to true, and the POST body content specifies a 'externalId' field, fields for the file found for externalId can be overwritten. The default setting is false. If metadata is included in the request body, all of the original metadata will be overwritten. The actual file will be overwritten after successful upload. If there is no successful upload, the current file contents will be kept. File-Asset mappings only change if explicitly stated in the assetIds field of the POST json body. Do not set assetIds in request body if you want to keep the current file-asset mappings.
Returns:
- FileMetadata | FileMetadataList: The file metadata of the uploaded file(s).
+ The file metadata of the uploaded file(s).
Examples:
@@ -594,12 +594,12 @@ async def upload_content_bytes(
Note that the maximum file size is 5GiB. In order to upload larger files use `multipart_upload_content_session`.
Args:
- content (str | bytes | BinaryIO): The content to upload.
- external_id (str | None): The external ID provided by the client. Must be unique within the project.
- instance_id (NodeId | None): Instance ID of the file.
+ content: The content to upload.
+ external_id: The external ID provided by the client. Must be unique within the project.
+ instance_id: Instance ID of the file.
Returns:
- FileMetadata: No description.
+ No description.
Examples:
@@ -693,24 +693,24 @@ async def upload_bytes(
Note that the maximum file size is 5GiB. In order to upload larger files use `multipart_upload_session`.
Args:
- content (str | bytes | BinaryIO | AsyncIterator[bytes]): The content to upload.
- name (str): Name of the file.
- external_id (str | None): The external ID provided by the client. Must be unique within the project.
- source (str | None): The source of the file.
- mime_type (str | None): File type. E.g. text/plain, application/pdf,...
- metadata (dict[str, str] | None): Customizable extra data about the file. String key -> String value.
- directory (str | None): The directory to be associated with this file. Must be an absolute, unix-style path.
- asset_ids (Sequence[int] | None): No description.
- data_set_id (int | None): Id of the data set.
- labels (Sequence[Label] | None): A list of the labels associated with this resource item.
- geo_location (GeoLocation | None): The geographic metadata of the file.
- source_created_time (int | None): The timestamp for when the file was originally created in the source system.
- source_modified_time (int | None): The timestamp for when the file was last modified in the source system.
- security_categories (Sequence[int] | None): Security categories to attach to this file.
- overwrite (bool): If 'overwrite' is set to true, and the POST body content specifies a 'externalId' field, fields for the file found for externalId can be overwritten. The default setting is false. If metadata is included in the request body, all of the original metadata will be overwritten. The actual file will be overwritten after successful upload. If there is no successful upload, the current file contents will be kept. File-Asset mappings only change if explicitly stated in the assetIds field of the POST json body. Do not set assetIds in request body if you want to keep the current file-asset mappings.
+ content: The content to upload.
+ name: Name of the file.
+ external_id: The external ID provided by the client. Must be unique within the project.
+ source: The source of the file.
+ mime_type: File type. E.g. text/plain, application/pdf,...
+ metadata: Customizable extra data about the file. String key -> String value.
+ directory: The directory to be associated with this file. Must be an absolute, unix-style path.
+ asset_ids: No description.
+ data_set_id: Id of the data set.
+ labels: A list of the labels associated with this resource item.
+ geo_location: The geographic metadata of the file.
+ source_created_time: The timestamp for when the file was originally created in the source system.
+ source_modified_time: The timestamp for when the file was last modified in the source system.
+ security_categories: Security categories to attach to this file.
+ overwrite: If 'overwrite' is set to true, and the POST body content specifies a 'externalId' field, fields for the file found for externalId can be overwritten. The default setting is false. If metadata is included in the request body, all of the original metadata will be overwritten. The actual file will be overwritten after successful upload. If there is no successful upload, the current file contents will be kept. File-Asset mappings only change if explicitly stated in the assetIds field of the POST json body. Do not set assetIds in request body if you want to keep the current file-asset mappings.
Returns:
- FileMetadata: The metadata of the uploaded file.
+ The metadata of the uploaded file.
Examples:
@@ -787,24 +787,24 @@ async def multipart_upload_session(
for each part before exiting. It also supports async usage with `async with`, then calling `await upload_part_async`.
Args:
- name (str): Name of the file.
- parts (int): The number of parts to upload, must be between 1 and 250.
- external_id (str | None): The external ID provided by the client. Must be unique within the project.
- source (str | None): The source of the file.
- mime_type (str | None): File type. E.g. text/plain, application/pdf,...
- metadata (dict[str, str] | None): Customizable extra data about the file. String key -> String value.
- directory (str | None): The directory to be associated with this file. Must be an absolute, unix-style path.
- asset_ids (Sequence[int] | None): No description.
- data_set_id (int | None): Id of the data set.
- labels (Sequence[Label] | None): A list of the labels associated with this resource item.
- geo_location (GeoLocation | None): The geographic metadata of the file.
- source_created_time (int | None): The timestamp for when the file was originally created in the source system.
- source_modified_time (int | None): The timestamp for when the file was last modified in the source system.
- security_categories (Sequence[int] | None): Security categories to attach to this file.
- overwrite (bool): If 'overwrite' is set to true, and the POST body content specifies a 'externalId' field, fields for the file found for externalId can be overwritten. The default setting is false. If metadata is included in the request body, all of the original metadata will be overwritten. The actual file will be overwritten after successful upload. If there is no successful upload, the current file contents will be kept. File-Asset mappings only change if explicitly stated in the assetIds field of the POST json body. Do not set assetIds in request body if you want to keep the current file-asset mappings.
+ name: Name of the file.
+ parts: The number of parts to upload, must be between 1 and 250.
+ external_id: The external ID provided by the client. Must be unique within the project.
+ source: The source of the file.
+ mime_type: File type. E.g. text/plain, application/pdf,...
+ metadata: Customizable extra data about the file. String key -> String value.
+ directory: The directory to be associated with this file. Must be an absolute, unix-style path.
+ asset_ids: No description.
+ data_set_id: Id of the data set.
+ labels: A list of the labels associated with this resource item.
+ geo_location: The geographic metadata of the file.
+ source_created_time: The timestamp for when the file was originally created in the source system.
+ source_modified_time: The timestamp for when the file was last modified in the source system.
+ security_categories: Security categories to attach to this file.
+ overwrite: If 'overwrite' is set to true, and the POST body content specifies a 'externalId' field, fields for the file found for externalId can be overwritten. The default setting is false. If metadata is included in the request body, all of the original metadata will be overwritten. The actual file will be overwritten after successful upload. If there is no successful upload, the current file contents will be kept. File-Asset mappings only change if explicitly stated in the assetIds field of the POST json body. Do not set assetIds in request body if you want to keep the current file-asset mappings.
Returns:
- FileMultipartUploadSession: Object containing metadata about the created file, and information needed to upload the file content. Use this object to manage the file upload, and `exit` it once all parts are uploaded.
+ Object containing metadata about the created file, and information needed to upload the file content. Use this object to manage the file upload, and `exit` it once all parts are uploaded.
Examples:
@@ -878,12 +878,12 @@ async def multipart_upload_content_session(
for each part before exiting. It also supports async usage with `async with`, then calling `await upload_part_async`.
Args:
- parts (int): The number of parts to upload, must be between 1 and 250.
- external_id (str | None): The external ID provided by the client. Must be unique within the project.
- instance_id (NodeId | None): Instance ID of the file.
+ parts: The number of parts to upload, must be between 1 and 250.
+ external_id: The external ID provided by the client. Must be unique within the project.
+ instance_id: Instance ID of the file.
Returns:
- FileMultipartUploadSession: Object containing metadata about the created file, and information needed to upload the file content. Use this object to manage the file upload, and `exit` it once all parts are uploaded.
+ Object containing metadata about the created file, and information needed to upload the file content. Use this object to manage the file upload, and `exit` it once all parts are uploaded.
Examples:
@@ -931,8 +931,8 @@ async def _upload_multipart_part(self, upload_url: str, content: str | bytes | B
If `content` does not somehow expose its length, this method may not work on Azure or AWS.
Args:
- upload_url (str): URL to upload file chunk to.
- content (str | bytes | BinaryIO): The content to upload.
+ upload_url: URL to upload file chunk to.
+ content: The content to upload.
"""
headers = {"accept": "*/*"}
file_size, file_content = prepare_content_for_upload(content)
@@ -954,7 +954,7 @@ async def _complete_multipart_upload(self, session: FileMultipartUploadSession)
"""Complete a multipart upload. Once this returns the file can be downloaded.
Args:
- session (FileMultipartUploadSession): Multipart upload session returned from
+ session: Multipart upload session returned from
"""
await self._post(
self._RESOURCE_PATH + "/completemultipartupload",
@@ -972,13 +972,13 @@ async def retrieve_download_urls(
"""Get download links by id or external id
Args:
- id (int | Sequence[int] | None): Id or list of ids.
- external_id (str | SequenceNotStr[str] | None): External id or list of external ids.
- instance_id (NodeId | Sequence[NodeId] | None): Instance id or list of instance ids.
- extended_expiration (bool): Extend expiration time of download url to 1 hour. Defaults to false.
+ id: Id or list of ids.
+ external_id: External id or list of external ids.
+ instance_id: Instance id or list of instance ids.
+ extended_expiration: Extend expiration time of download url to 1 hour. Defaults to false.
Returns:
- dict[int | str | NodeId, str]: Dictionary containing download urls.
+ Dictionary containing download urls.
"""
identifiers = IdentifierSequence.load(ids=id, external_ids=external_id, instance_ids=instance_id)
@@ -1052,13 +1052,12 @@ async def download(
the files missing. A warning is issued when this happens, listing the affected files.
Args:
- directory (str | Path): Directory to download the file(s) to.
- id (int | Sequence[int] | None): Id or list of ids
- external_id (str | SequenceNotStr[str] | None): External ID or list of external ids.
- instance_id (NodeId | Sequence[NodeId] | None): Instance ID or list of instance ids.
- keep_directory_structure (bool): Whether or not to keep the directory hierarchy in CDF,
- creating subdirectories as needed below the given directory.
- resolve_duplicate_file_names (bool): Whether or not to resolve duplicate file names by appending a number on duplicate file names
+ directory: Directory to download the file(s) to.
+ id: Id or list of ids
+ external_id: External ID or list of external ids.
+ instance_id: Instance ID or list of instance ids.
+ keep_directory_structure: Whether or not to keep the directory hierarchy in CDF, creating subdirectories as needed below the given directory.
+ resolve_duplicate_file_names: Whether or not to resolve duplicate file names by appending a number on duplicate file names
Examples:
@@ -1196,10 +1195,10 @@ async def download_to_path(
"""Download a file to a specific target.
Args:
- path (Path | str): Download to this path.
- id (int | None): Id of of the file to download.
- external_id (str | None): External id of the file to download.
- instance_id (NodeId | None): Instance id of the file to download.
+ path: Download to this path.
+ id: Id of of the file to download.
+ external_id: External id of the file to download.
+ instance_id: Instance id of the file to download.
Examples:
@@ -1223,9 +1222,9 @@ async def download_bytes(
"""Download a file as bytes.
Args:
- id (int | None): Id of the file
- external_id (str | None): External id of the file
- instance_id (NodeId | None): Instance id of the file
+ id: Id of the file
+ external_id: External id of the file
+ instance_id: Instance id of the file
Examples:
@@ -1237,7 +1236,7 @@ async def download_bytes(
>>> file_content = client.files.download_bytes(id=1)
Returns:
- bytes: The file in binary format
+ The file in binary format
"""
identifier = Identifier.of_either(id, external_id, instance_id).as_dict()
download_link = await self._get_download_link(identifier)
@@ -1281,31 +1280,31 @@ async def list(
"""`List files `_
Args:
- name (str | None): Name of the file.
- mime_type (str | None): File type. E.g. text/plain, application/pdf, ..
- metadata (dict[str, str] | None): Custom, application specific metadata. String key -> String value
- asset_ids (Sequence[int] | None): Only include files that reference these specific asset IDs.
- asset_external_ids (SequenceNotStr[str] | None): No description.
- asset_subtree_ids (int | Sequence[int] | None): Only include files that have a related asset in a subtree rooted at any of these assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned.
- asset_subtree_external_ids (str | SequenceNotStr[str] | None): Only include files that have a related asset in a subtree rooted at any of these assetExternalIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned.
- data_set_ids (int | Sequence[int] | None): Return only files in the specified data set(s) with this id / these ids.
- data_set_external_ids (str | SequenceNotStr[str] | None): Return only files in the specified data set(s) with this external id / these external ids.
- labels (LabelFilter | None): Return only the files matching the specified label filter(s).
- geo_location (GeoLocationFilter | None): Only include files matching the specified geographic relation.
- source (str | None): The source of this event.
- created_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms.
- last_updated_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms.
- source_created_time (dict[str, Any] | TimestampRange | None): Filter for files where the sourceCreatedTime field has been set and is within the specified range.
- source_modified_time (dict[str, Any] | TimestampRange | None): Filter for files where the sourceModifiedTime field has been set and is within the specified range.
- uploaded_time (dict[str, Any] | TimestampRange | None): Range between two timestamps
- external_id_prefix (str | None): External Id provided by client. Should be unique within the project.
- directory_prefix (str | None): Filter by this (case-sensitive) prefix for the directory provided by the client.
- uploaded (bool | None): Whether or not the actual file is uploaded. This field is returned only by the API, it has no effect in a post body.
- limit (int | None): Max number of files to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
- partitions (int | None): Retrieve resources in parallel using this number of workers (values up to 10 allowed), limit must be set to `None` (or `-1`).
+ name: Name of the file.
+ mime_type: File type. E.g. text/plain, application/pdf, ..
+ metadata: Custom, application specific metadata. String key -> String value
+ asset_ids: Only include files that reference these specific asset IDs.
+ asset_external_ids: No description.
+ asset_subtree_ids: Only include files that have a related asset in a subtree rooted at any of these assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned.
+ asset_subtree_external_ids: Only include files that have a related asset in a subtree rooted at any of these assetExternalIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned.
+ data_set_ids: Return only files in the specified data set(s) with this id / these ids.
+ data_set_external_ids: Return only files in the specified data set(s) with this external id / these external ids.
+ labels: Return only the files matching the specified label filter(s).
+ geo_location: Only include files matching the specified geographic relation.
+ source: The source of this event.
+ created_time: Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms.
+ last_updated_time: Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms.
+ source_created_time: Filter for files where the sourceCreatedTime field has been set and is within the specified range.
+ source_modified_time: Filter for files where the sourceModifiedTime field has been set and is within the specified range.
+ uploaded_time: Range between two timestamps
+ external_id_prefix: External Id provided by client. Should be unique within the project.
+ directory_prefix: Filter by this (case-sensitive) prefix for the directory provided by the client.
+ uploaded: Whether or not the actual file is uploaded. This field is returned only by the API, it has no effect in a post body.
+ limit: Max number of files to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
+ partitions: Retrieve resources in parallel using this number of workers (values up to 10 allowed), limit must be set to `None` (or `-1`).
Returns:
- FileMetadataList: The requested files.
+ The requested files.
Examples:
diff --git a/cognite/client/_api/functions/__init__.py b/cognite/client/_api/functions/__init__.py
index 54b4d9bdf6..9e1c46a9e7 100644
--- a/cognite/client/_api/functions/__init__.py
+++ b/cognite/client/_api/functions/__init__.py
@@ -115,18 +115,18 @@ async def __call__(
"""Iterate over functions.
Args:
- chunk_size (int | None): Number of functions to yield per chunk. Defaults to yielding functions one by one.
- name (str | None): The name of the function.
- owner (str | None): Owner of the function.
- file_id (int | None): The file ID of the zip-file used to create the function.
- status (FunctionStatus | None): Status of the function. Possible values: ["Queued", "Deploying", "Ready", "Failed"].
- external_id_prefix (str | None): External ID prefix to filter on.
- created_time (dict[Literal['min', 'max'], int] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms.
- metadata (dict[str, str] | None): No description.
- limit (int | None): Maximum number of functions to return. Defaults to yielding all functions.
+ chunk_size: Number of functions to yield per chunk. Defaults to yielding functions one by one.
+ name: The name of the function.
+ owner: Owner of the function.
+ file_id: The file ID of the zip-file used to create the function.
+ status: Status of the function. Possible values: ["Queued", "Deploying", "Ready", "Failed"].
+ external_id_prefix: External ID prefix to filter on.
+ created_time: Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms.
+ metadata: No description.
+ limit: Maximum number of functions to return. Defaults to yielding all functions.
Yields:
- Function | FunctionList: An iterator over functions.
+ An iterator over functions.
""" # noqa: DOC404
# The _list_generator method is not used as the /list endpoint does not
# respond with a cursor (pagination is not supported)
@@ -185,28 +185,27 @@ async def create(
For help with troubleshooting, please see `this page. `_
Args:
- name (str | FunctionWrite): The name of the function or a FunctionWrite object. If a FunctionWrite
- object is passed, all other arguments are ignored.
- folder (str | None): Path to the folder where the function source code is located.
- file_id (int | None): File ID of the code uploaded to the Files API.
- function_path (str): Relative path from the root folder to the file containing the `handle` function. Defaults to `handler.py`. Must be on POSIX path format.
- function_handle (FunctionHandle | None): Reference to a function object, which must be named `handle`.
- external_id (str | None): External id of the function.
- description (str | None): Description of the function.
- owner (str | None): Owner of this function. Typically used to know who created it.
- secrets (dict[str, str] | None): Additional secrets as key/value pairs. These can e.g. password to simulators or other data sources. Keys must be lowercase characters, numbers or dashes (-) and at most 15 characters. You can create at most 30 secrets, all keys must be unique.
- env_vars (dict[str, str] | None): Environment variables as key/value pairs. Keys can contain only letters, numbers or the underscore character. You can create at most 100 environment variables.
- cpu (float | None): Number of CPU cores per function. Allowed range and default value are given by the `limits endpoint. `_, and None translates to the API default. On Azure, only the default value is used.
- memory (float | None): Memory per function measured in GB. Allowed range and default value are given by the `limits endpoint. `_, and None translates to the API default. On Azure, only the default value is used.
- runtime (RunTime | None): The function runtime. Valid values are ["py310", "py311", "py312", `None`], and `None` translates to the API default which will change over time. The runtime "py312" resolves to the latest version of the Python 3.12 series.
- metadata (dict[str, str] | None): Metadata for the function as key/value pairs. Key & values can be at most 32, 512 characters long respectively. You can have at the most 16 key-value pairs, with a maximum size of 512 bytes.
- index_url (str | None): Index URL for Python Package Manager to use. Be aware of the intrinsic security implications of using the `index_url` option. `More information can be found on official docs, `_
- extra_index_urls (list[str] | None): Extra Index URLs for Python Package Manager to use. Be aware of the intrinsic security implications of using the `extra_index_urls` option. `More information can be found on official docs, `_
- skip_folder_validation (bool): When creating a function using the 'folder' argument, pass True to skip the extra validation step that attempts to import the module. Skipping can be useful when your function requires several heavy packages to already be installed locally. Defaults to False.
- data_set_id (int | None): Data set to upload the function code to. Note: Does not affect the function itself.
+ name: The name of the function or a FunctionWrite object. If a FunctionWrite object is passed, all other arguments are ignored.
+ folder: Path to the folder where the function source code is located.
+ file_id: File ID of the code uploaded to the Files API.
+ function_path: Relative path from the root folder to the file containing the `handle` function. Defaults to `handler.py`. Must be on POSIX path format.
+ function_handle: Reference to a function object, which must be named `handle`.
+ external_id: External id of the function.
+ description: Description of the function.
+ owner: Owner of this function. Typically used to know who created it.
+ secrets: Additional secrets as key/value pairs. These can e.g. password to simulators or other data sources. Keys must be lowercase characters, numbers or dashes (-) and at most 15 characters. You can create at most 30 secrets, all keys must be unique.
+ env_vars: Environment variables as key/value pairs. Keys can contain only letters, numbers or the underscore character. You can create at most 100 environment variables.
+ cpu: Number of CPU cores per function. Allowed range and default value are given by the `limits endpoint. `_, and None translates to the API default. On Azure, only the default value is used.
+ memory: Memory per function measured in GB. Allowed range and default value are given by the `limits endpoint. `_, and None translates to the API default. On Azure, only the default value is used.
+ runtime: The function runtime. Valid values are ["py310", "py311", "py312", `None`], and `None` translates to the API default which will change over time. The runtime "py312" resolves to the latest version of the Python 3.12 series.
+ metadata: Metadata for the function as key/value pairs. Key & values can be at most 32, 512 characters long respectively. You can have at the most 16 key-value pairs, with a maximum size of 512 bytes.
+ index_url: Index URL for Python Package Manager to use. Be aware of the intrinsic security implications of using the `index_url` option. `More information can be found on official docs, `_
+ extra_index_urls: Extra Index URLs for Python Package Manager to use. Be aware of the intrinsic security implications of using the `extra_index_urls` option. `More information can be found on official docs, `_
+ skip_folder_validation: When creating a function using the 'folder' argument, pass True to skip the extra validation step that attempts to import the module. Skipping can be useful when your function requires several heavy packages to already be installed locally. Defaults to False.
+ data_set_id: Data set to upload the function code to. Note: Does not affect the function itself.
Returns:
- Function: The created function.
+ The created function.
Examples:
@@ -345,8 +344,8 @@ async def delete(
"""`Delete one or more functions. `_
Args:
- id (int | Sequence[int] | None): Id or list of ids.
- external_id (str | SequenceNotStr[str] | None): External ID or list of external ids.
+ id: Id or list of ids.
+ external_id: External ID or list of external ids.
Example:
@@ -376,17 +375,17 @@ async def list(
"""`List all functions. `_
Args:
- name (str | None): The name of the function.
- owner (str | None): Owner of the function.
- file_id (int | None): The file ID of the zip-file used to create the function.
- status (FunctionStatus | None): Status of the function. Possible values: ["Queued", "Deploying", "Ready", "Failed"].
- external_id_prefix (str | None): External ID prefix to filter on.
- created_time (dict[Literal['min', 'max'], int] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms.
- metadata (dict[str, str] | None): Custom, application-specific metadata. String key -> String value. Limits: Maximum length of key is 32, value 512 characters, up to 16 key-value pairs. Maximum size of entire metadata is 4096 bytes.
- limit (int | None): Maximum number of functions to return. Pass in -1, float('inf') or None to list all.
+ name: The name of the function.
+ owner: Owner of the function.
+ file_id: The file ID of the zip-file used to create the function.
+ status: Status of the function. Possible values: ["Queued", "Deploying", "Ready", "Failed"].
+ external_id_prefix: External ID prefix to filter on.
+ created_time: Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms.
+ metadata: Custom, application-specific metadata. String key -> String value. Limits: Maximum length of key is 32, value 512 characters, up to 16 key-value pairs. Maximum size of entire metadata is 4096 bytes.
+ limit: Maximum number of functions to return. Pass in -1, float('inf') or None to list all.
Returns:
- FunctionList: List of functions
+ List of functions
Example:
@@ -425,11 +424,11 @@ async def retrieve(self, id: int | None = None, external_id: str | None = None)
"""`Retrieve a single function by id. `_
Args:
- id (int | None): ID
- external_id (str | None): External ID
+ id: ID
+ external_id: External ID
Returns:
- Function | None: Requested function or None if it does not exist.
+ Requested function or None if it does not exist.
Examples:
@@ -456,12 +455,12 @@ async def retrieve_multiple(
"""`Retrieve multiple functions by id. `_
Args:
- ids (Sequence[int] | None): IDs
- external_ids (SequenceNotStr[str] | None): External IDs
- ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception.
+ ids: IDs
+ external_ids: External IDs
+ ignore_unknown_ids: Ignore IDs and external IDs that are not found rather than throw an exception.
Returns:
- FunctionList: The requested functions.
+ The requested functions.
Examples:
@@ -496,17 +495,17 @@ async def call(
"""`Call a function by its ID or external ID. `_.
Args:
- id (int | None): ID
- external_id (str | None): External ID
- data (dict[str, object] | None): Input data to the function (JSON serializable). This data is passed deserialized into the function through one of the arguments called data. **WARNING:** Secrets or other confidential information should not be passed via this argument. There is a dedicated `secrets` argument in FunctionsAPI.create() for this purpose.'
- wait (bool): Wait until the function call is finished. Defaults to True.
- nonce (str | None): Nonce retrieved from sessions API when creating a session. This will be used to bind the session before executing the function. If not provided, a new session will be created based on the client credentials.
+ id: ID
+ external_id: External ID
+ data: Input data to the function (JSON serializable). This data is passed deserialized into the function through one of the arguments called data. **WARNING:** Secrets or other confidential information should not be passed via this argument. There is a dedicated `secrets` argument in FunctionsAPI.create() for this purpose.'
+ wait: Wait until the function call is finished. Defaults to True.
+ nonce: Nonce retrieved from sessions API when creating a session. This will be used to bind the session before executing the function. If not provided, a new session will be created based on the client credentials.
Tip:
You can create a session via the Sessions API, using the client.iam.session.create() method.
Returns:
- FunctionCall: A function call object.
+ A function call object.
Examples:
@@ -540,7 +539,7 @@ async def limits(self) -> FunctionsLimits:
"""`Get service limits. `_.
Returns:
- FunctionsLimits: A function limits object.
+ A function limits object.
Examples:
@@ -652,7 +651,7 @@ async def activate(self) -> FunctionsStatus:
May take some time to take effect (hours).
Returns:
- FunctionsStatus: A function activation status.
+ A function activation status.
Examples:
@@ -670,7 +669,7 @@ async def status(self) -> FunctionsStatus:
"""`Functions activation status for the Project. `_.
Returns:
- FunctionsStatus: A function activation status.
+ A function activation status.
Examples:
@@ -694,11 +693,10 @@ def get_handle_function_node(file_content: str) -> ast.FunctionDef | ast.Assign
and assignments since Cognite Functions require directly callable entry points.
Args:
- file_content (str): The Python source code as a string
+ file_content: The Python source code as a string
Returns:
- ast.FunctionDef | ast.Assign | ast.AnnAssign | None: The AST node of the last top-level 'handle' function,
- assignment, or None if not found or if the file is not a valid Python file.
+ The AST node of the last top-level 'handle' function, assignment, or None if not found or if the file is not a valid Python file.
"""
try:
tree = ast.parse(file_content)
@@ -824,10 +822,10 @@ def _extract_requirements_from_file(file_name: str) -> list[str]:
"""Extracts a list of library requirements from a file. Comments, lines starting with '#', are ignored.
Args:
- file_name (str): name of the file to parse
+ file_name: name of the file to parse
Returns:
- list[str]: returns a list of library records
+ returns a list of library records
"""
requirements: list[str] = []
with open(file_name, "r+") as f:
@@ -842,10 +840,10 @@ def _extract_requirements_from_doc_string(docstr: str) -> list[str] | None:
"""Extracts a list of library requirements defined between [requirements] and [/requirements] in a functions docstring.
Args:
- docstr (str): the docstring to extract requirements from
+ docstr: the docstring to extract requirements from
Returns:
- list[str] | None: returns a list of library records if requirements are defined in the docstring, else None
+ returns a list of library records if requirements are defined in the docstring, else None
"""
substr_start, substr_end = None, None
@@ -867,11 +865,11 @@ def _validate_and_parse_requirements(requirements: list[str]) -> list[str]:
"""Validates the requirement specifications
Args:
- requirements (list[str]): list of requirement specifications
+ requirements: list of requirement specifications
Raises:
ValueError: if validation of requirements fails
Returns:
- list[str]: The parsed requirements
+ The parsed requirements
"""
constructors = local_import("pip._internal.req.constructors")
install_req_from_line = constructors.install_req_from_line
@@ -890,10 +888,10 @@ def _get_fn_docstring_requirements(fn: Callable) -> list[str]:
"""Read requirements from a function docstring, validate them and return.
Args:
- fn (Callable): the function to read requirements from
+ fn: the function to read requirements from
Returns:
- list[str]: A (possibly empty) list of requirements.
+ A (possibly empty) list of requirements.
"""
if docstr := getdoc(fn):
if reqs := _extract_requirements_from_doc_string(docstr):
diff --git a/cognite/client/_api/functions/calls.py b/cognite/client/_api/functions/calls.py
index ea86ac2dc5..30f53c556b 100644
--- a/cognite/client/_api/functions/calls.py
+++ b/cognite/client/_api/functions/calls.py
@@ -30,16 +30,16 @@ async def list(
"""`List all calls associated with a specific function id. `_ Either function_id or function_external_id must be specified.
Args:
- function_id (int | None): ID of the function on which the calls were made.
- function_external_id (str | None): External ID of the function on which the calls were made.
- status (str | None): Status of the call. Possible values ["Running", "Failed", "Completed", "Timeout"].
- schedule_id (int | None): Schedule id from which the call belongs (if any).
- start_time (dict[str, int] | None): Start time of the call. Possible keys are `min` and `max`, with values given as time stamps in ms.
- end_time (dict[str, int] | None): End time of the call. Possible keys are `min` and `max`, with values given as time stamps in ms.
- limit (int | None): Maximum number of function calls to list. Pass in -1, float('inf') or None to list all Function Calls.
+ function_id: ID of the function on which the calls were made.
+ function_external_id: External ID of the function on which the calls were made.
+ status: Status of the call. Possible values ["Running", "Failed", "Completed", "Timeout"].
+ schedule_id: Schedule id from which the call belongs (if any).
+ start_time: Start time of the call. Possible keys are `min` and `max`, with values given as time stamps in ms.
+ end_time: End time of the call. Possible keys are `min` and `max`, with values given as time stamps in ms.
+ limit: Maximum number of function calls to list. Pass in -1, float('inf') or None to list all Function Calls.
Returns:
- FunctionCallList: List of function calls
+ List of function calls
Examples:
@@ -83,12 +83,12 @@ async def retrieve(
"""`Retrieve a single function call by id. `_
Args:
- call_id (int): ID of the call.
- function_id (int | None): ID of the function on which the call was made.
- function_external_id (str | None): External ID of the function on which the call was made.
+ call_id: ID of the call.
+ function_id: ID of the function on which the call was made.
+ function_external_id: External ID of the function on which the call was made.
Returns:
- FunctionCall | None: Requested function call or None if either call ID or function identifier is not found.
+ Requested function call or None if either call ID or function identifier is not found.
Examples:
@@ -125,12 +125,12 @@ async def get_response(
"""`Retrieve the response from a function call. `_
Args:
- call_id (int): ID of the call.
- function_id (int | None): ID of the function on which the call was made.
- function_external_id (str | None): External ID of the function on which the call was made.
+ call_id: ID of the call.
+ function_id: ID of the function on which the call was made.
+ function_external_id: External ID of the function on which the call was made.
Returns:
- dict[str, object] | None: Response from the function call.
+ Response from the function call.
Examples:
@@ -163,12 +163,12 @@ async def get_logs(
"""`Retrieve logs for function call. `_
Args:
- call_id (int): ID of the call.
- function_id (int | None): ID of the function on which the call was made.
- function_external_id (str | None): External ID of the function on which the call was made.
+ call_id: ID of the call.
+ function_id: ID of the function on which the call was made.
+ function_external_id: External ID of the function on which the call was made.
Returns:
- FunctionCallLog: Log for the function call.
+ Log for the function call.
Examples:
diff --git a/cognite/client/_api/functions/schedules.py b/cognite/client/_api/functions/schedules.py
index 71cd7b9437..a7970259ea 100644
--- a/cognite/client/_api/functions/schedules.py
+++ b/cognite/client/_api/functions/schedules.py
@@ -64,16 +64,16 @@ async def __call__(
"""Iterate over function schedules
Args:
- chunk_size (int | None): The number of schedules to return in each chunk. Defaults to yielding one schedule a time.
- name (str | None): Name of the function schedule.
- function_id (int | None): ID of the function the schedules are linked to.
- function_external_id (str | None): External ID of the function the schedules are linked to.
- created_time (dict[str, int] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms.
- cron_expression (str | None): Cron expression.
- limit (int | None): Maximum schedules to return. Defaults to return all schedules.
+ chunk_size: The number of schedules to return in each chunk. Defaults to yielding one schedule a time.
+ name: Name of the function schedule.
+ function_id: ID of the function the schedules are linked to.
+ function_external_id: External ID of the function the schedules are linked to.
+ created_time: Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms.
+ cron_expression: Cron expression.
+ limit: Maximum schedules to return. Defaults to return all schedules.
Yields:
- FunctionSchedule | FunctionSchedulesList: Function schedules.
+ Function schedules.
""" # noqa: DOC404
_ensure_at_most_one_id_given(function_id, function_external_id)
@@ -104,11 +104,11 @@ async def retrieve(
"""`Retrieve a single function schedule by id. `_
Args:
- id (int | Sequence[int]): Schedule ID
- ignore_unknown_ids (bool): Ignore IDs that are not found rather than throw an exception.
+ id: Schedule ID
+ ignore_unknown_ids: Ignore IDs that are not found rather than throw an exception.
Returns:
- FunctionSchedule | None | FunctionSchedulesList: Requested function schedule or None if not found.
+ Requested function schedule or None if not found.
Examples:
@@ -140,15 +140,15 @@ async def list(
"""`List all schedules associated with a specific project. `_
Args:
- name (str | None): Name of the function schedule.
- function_id (int | None): ID of the function the schedules are linked to.
- function_external_id (str | None): External ID of the function the schedules are linked to.
- created_time (dict[str, int] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms.
- cron_expression (str | None): Cron expression.
- limit (int | None): Maximum number of schedules to list. Pass in -1, float('inf') or None to list all.
+ name: Name of the function schedule.
+ function_id: ID of the function the schedules are linked to.
+ function_external_id: External ID of the function the schedules are linked to.
+ created_time: Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms.
+ cron_expression: Cron expression.
+ limit: Maximum number of schedules to list. Pass in -1, float('inf') or None to list all.
Returns:
- FunctionSchedulesList: List of function schedules
+ List of function schedules
Examples:
@@ -198,18 +198,16 @@ async def create(
"""`Create a schedule associated with a specific project. `_
Args:
- name (str | FunctionScheduleWrite): Name of the schedule or FunctionSchedule object. If a function schedule object is passed, the other arguments are ignored except for the client_credentials argument.
- cron_expression (str | None): Cron expression.
- function_id (int | None): Id of the function to attach the schedule to.
- function_external_id (str | None): (DEPRECATED) External id of the function to attach the schedule to.
- Note: Will be automatically converted to (internal) ID, as schedules must be bound to an ID.
- client_credentials (dict[str, str] | ClientCredentials | None): Instance of ClientCredentials
- or a dictionary containing client credentials: 'client_id' and 'client_secret'.
- description (str | None): Description of the schedule.
- data (dict[str, object] | None): Data to be passed to the scheduled run.
+ name: Name of the schedule or FunctionSchedule object. If a function schedule object is passed, the other arguments are ignored except for the client_credentials argument.
+ cron_expression: Cron expression.
+ function_id: Id of the function to attach the schedule to.
+ function_external_id: (DEPRECATED) External id of the function to attach the schedule to. Note: Will be automatically converted to (internal) ID, as schedules must be bound to an ID.
+ client_credentials: Instance of ClientCredentials or a dictionary containing client credentials: 'client_id' and 'client_secret'.
+ description: Description of the schedule.
+ data: Data to be passed to the scheduled run.
Returns:
- FunctionSchedule: Created function schedule.
+ Created function schedule.
Note:
There are several ways to authenticate the function schedule — the order of priority is as follows:
@@ -315,7 +313,7 @@ async def delete(self, id: int) -> None:
"""`Delete a schedule associated with a specific project. `_
Args:
- id (int): Id of the schedule
+ id: Id of the schedule
Examples:
@@ -334,10 +332,10 @@ async def get_input_data(self, id: int) -> dict[str, object] | None:
"""`Retrieve the input data to the associated function. `_
Args:
- id (int): Id of the schedule
+ id: Id of the schedule
Returns:
- dict[str, object] | None: Input data to the associated function or None if not set. This data is passed deserialized into the function through the data argument.
+ Input data to the associated function or None if not set. This data is passed deserialized into the function through the data argument.
Examples:
diff --git a/cognite/client/_api/geospatial.py b/cognite/client/_api/geospatial.py
index b3ea42756a..784e0b909b 100644
--- a/cognite/client/_api/geospatial.py
+++ b/cognite/client/_api/geospatial.py
@@ -64,10 +64,10 @@ async def create_feature_types(
Args:
- feature_type (FeatureType | FeatureTypeWrite | Sequence[FeatureType] | Sequence[FeatureTypeWrite]): feature type definition or list of feature type definitions to create.
+ feature_type: feature type definition or list of feature type definitions to create.
Returns:
- FeatureType | FeatureTypeList: Created feature type definition(s)
+ Created feature type definition(s)
Examples:
@@ -100,8 +100,8 @@ async def delete_feature_types(self, external_id: str | SequenceNotStr[str], rec
Args:
- external_id (str | SequenceNotStr[str]): External ID or list of external ids
- recursive (bool): if `true` the features will also be dropped
+ external_id: External ID or list of external ids
+ recursive: if `true` the features will also be dropped
Examples:
@@ -125,7 +125,7 @@ async def list_feature_types(self) -> FeatureTypeList:
Returns:
- FeatureTypeList: List of feature types
+ List of feature types
Examples:
@@ -155,10 +155,10 @@ async def retrieve_feature_types(self, external_id: str | list[str]) -> FeatureT
Args:
- external_id (str | list[str]): External ID
+ external_id: External ID
Returns:
- FeatureType | FeatureTypeList: Requested Type or None if it does not exist.
+ Requested Type or None if it does not exist.
Examples:
@@ -182,10 +182,10 @@ async def patch_feature_types(self, patch: FeatureTypePatch | Sequence[FeatureTy
Args:
- patch (FeatureTypePatch | Sequence[FeatureTypePatch]): the patch to apply
+ patch: the patch to apply
Returns:
- FeatureTypeList: The patched feature types.
+ The patched feature types.
Examples:
@@ -263,13 +263,13 @@ async def create_features(
Args:
- feature_type_external_id (str): Feature type definition for the features to create.
- feature (Feature | FeatureWrite | Sequence[Feature] | Sequence[FeatureWrite] | FeatureList | FeatureWriteList): one feature or a list of features to create or a FeatureList object
- allow_crs_transformation (bool): If true, then input geometries will be transformed into the Coordinate Reference System defined in the feature type specification. When it is false, then requests with geometries in Coordinate Reference System different from the ones defined in the feature type will result in CogniteAPIError exception.
- chunk_size (int | None): maximum number of items in a single request to the api
+ feature_type_external_id: Feature type definition for the features to create.
+ feature: one feature or a list of features to create or a FeatureList object
+ allow_crs_transformation: If true, then input geometries will be transformed into the Coordinate Reference System defined in the feature type specification. When it is false, then requests with geometries in Coordinate Reference System different from the ones defined in the feature type will result in CogniteAPIError exception.
+ chunk_size: maximum number of items in a single request to the api
Returns:
- Feature | FeatureList: Created features
+ Created features
Examples:
@@ -322,8 +322,8 @@ async def delete_features(
Args:
- feature_type_external_id (str): No description.
- external_id (str | SequenceNotStr[str] | None): External ID or list of external ids
+ feature_type_external_id: No description.
+ external_id: External ID or list of external ids
Examples:
@@ -368,12 +368,12 @@ async def retrieve_features(
Args:
- feature_type_external_id (str): No description.
- external_id (str | list[str]): External ID or list of external ids
- properties (dict[str, Any] | None): the output property selection
+ feature_type_external_id: No description.
+ external_id: External ID or list of external ids
+ properties: the output property selection
Returns:
- FeatureList | Feature: Requested features or None if it does not exist.
+ Requested features or None if it does not exist.
Examples:
@@ -426,13 +426,13 @@ async def update_features(
Args:
- feature_type_external_id (str): No description.
- feature (Feature | FeatureWrite | Sequence[Feature] | Sequence[FeatureWrite]): feature or list of features.
- allow_crs_transformation (bool): If true, then input geometries will be transformed into the Coordinate Reference System defined in the feature type specification. When it is false, then requests with geometries in Coordinate Reference System different from the ones defined in the feature type will result in CogniteAPIError exception.
- chunk_size (int | None): maximum number of items in a single request to the api
+ feature_type_external_id: No description.
+ feature: feature or list of features.
+ allow_crs_transformation: If true, then input geometries will be transformed into the Coordinate Reference System defined in the feature type specification. When it is false, then requests with geometries in Coordinate Reference System different from the ones defined in the feature type will result in CogniteAPIError exception.
+ chunk_size: maximum number of items in a single request to the api
Returns:
- Feature | FeatureList: Updated features
+ Updated features
Examples:
@@ -481,14 +481,14 @@ async def list_features(
This method allows to filter all features.
Args:
- feature_type_external_id (str): the feature type to list features for
- filter (dict[str, Any] | None): the list filter
- properties (dict[str, Any] | None): the output property selection
- limit (int | None): Maximum number of features to return. Defaults to 25. Set to -1, float("inf") or None to return all features.
- allow_crs_transformation (bool): If true, then input geometries if existing in the filter will be transformed into the Coordinate Reference System defined in the feature type specification. When it is false, then requests with geometries in Coordinate Reference System different from the ones defined in the feature type will result in CogniteAPIError exception.
+ feature_type_external_id: the feature type to list features for
+ filter: the list filter
+ properties: the output property selection
+ limit: Maximum number of features to return. Defaults to 25. Set to -1, float("inf") or None to return all features.
+ allow_crs_transformation: If true, then input geometries if existing in the filter will be transformed into the Coordinate Reference System defined in the feature type specification. When it is false, then requests with geometries in Coordinate Reference System different from the ones defined in the feature type will result in CogniteAPIError exception.
Returns:
- FeatureList: The filtered features
+ The filtered features
Examples:
@@ -564,16 +564,16 @@ async def search_features(
If you need to return more than 1000 items, use the `stream_features(...)` method instead.
Args:
- feature_type_external_id (str): The feature type to search for
- filter (dict[str, Any] | None): The search filter
- properties (dict[str, Any] | None): The output property selection
- limit (int): Maximum number of results
- order_by (Sequence[OrderSpec] | None): The order specification
- allow_crs_transformation (bool): If true, then input geometries will be transformed into the Coordinate Reference System defined in the feature type specification. When it is false, then requests with geometries in Coordinate Reference System different from the ones defined in the feature type will result in CogniteAPIError exception.
- allow_dimensionality_mismatch (bool): Indicating if the spatial filter operators allow input geometries with a different dimensionality than the properties they are applied to. Defaults to False.
+ feature_type_external_id: The feature type to search for
+ filter: The search filter
+ properties: The output property selection
+ limit: Maximum number of results
+ order_by: The order specification
+ allow_crs_transformation: If true, then input geometries will be transformed into the Coordinate Reference System defined in the feature type specification. When it is false, then requests with geometries in Coordinate Reference System different from the ones defined in the feature type will result in CogniteAPIError exception.
+ allow_dimensionality_mismatch: Indicating if the spatial filter operators allow input geometries with a different dimensionality than the properties they are applied to. Defaults to False.
Returns:
- FeatureList: the filtered features
+ the filtered features
Examples:
@@ -692,14 +692,14 @@ async def stream_features(
If you need to order the results, use the `search_features(...)` method instead.
Args:
- feature_type_external_id (str): the feature type to search for
- filter (dict[str, Any] | None): the search filter
- properties (dict[str, Any] | None): the output property selection
- allow_crs_transformation (bool): If true, then input geometries will be transformed into the Coordinate Reference System defined in the feature type specification. When it is false, then requests with geometries in Coordinate Reference System different from the ones defined in the feature type will result in CogniteAPIError exception.
- allow_dimensionality_mismatch (bool): Indicating if the spatial filter operators allow input geometries with a different dimensionality than the properties they are applied to. Defaults to False.
+ feature_type_external_id: the feature type to search for
+ filter: the search filter
+ properties: the output property selection
+ allow_crs_transformation: If true, then input geometries will be transformed into the Coordinate Reference System defined in the feature type specification. When it is false, then requests with geometries in Coordinate Reference System different from the ones defined in the feature type will result in CogniteAPIError exception.
+ allow_dimensionality_mismatch: Indicating if the spatial filter operators allow input geometries with a different dimensionality than the properties they are applied to. Defaults to False.
Yields:
- Feature: a generator for the filtered features
+ a generator for the filtered features
Examples:
@@ -753,14 +753,14 @@ async def aggregate_features(
Args:
- feature_type_external_id (str): the feature type to filter features from
- filter (dict[str, Any] | None): the search filter
- group_by (SequenceNotStr[str] | None): list of properties to group by with
- order_by (Sequence[OrderSpec] | None): the order specification
- output (dict[str, Any] | None): the aggregate output
+ feature_type_external_id: the feature type to filter features from
+ filter: the search filter
+ group_by: list of properties to group by with
+ order_by: the order specification
+ output: the aggregate output
Returns:
- FeatureAggregateList: the filtered features
+ the filtered features
Examples:
@@ -804,10 +804,10 @@ async def get_coordinate_reference_systems(self, srids: int | Sequence[int]) ->
Args:
- srids (int | Sequence[int]): (Union[int, Sequence[int]]): SRID or list of SRIDs
+ srids: SRID or list of SRIDs
Returns:
- CoordinateReferenceSystemList: Requested CRSs.
+ Requested CRSs.
Examples:
@@ -835,10 +835,10 @@ async def list_coordinate_reference_systems(self, only_custom: bool = False) ->
Args:
- only_custom (bool): list only custom CRSs or not
+ only_custom: list only custom CRSs or not
Returns:
- CoordinateReferenceSystemList: list of CRSs.
+ list of CRSs.
Examples:
@@ -867,10 +867,10 @@ async def create_coordinate_reference_systems(
Args:
- crs (CoordinateReferenceSystem | CoordinateReferenceSystemWrite | Sequence[CoordinateReferenceSystem] | Sequence[CoordinateReferenceSystemWrite]): a CoordinateReferenceSystem or a list of CoordinateReferenceSystem
+ crs: a CoordinateReferenceSystem or a list of CoordinateReferenceSystem
Returns:
- CoordinateReferenceSystemList: list of CRSs.
+ list of CRSs.
Examples:
@@ -934,7 +934,7 @@ async def delete_coordinate_reference_systems(self, srids: int | Sequence[int])
Args:
- srids (int | Sequence[int]): (Union[int, Sequence[int]]): SRID or list of SRIDs
+ srids: SRID or list of SRIDs
Examples:
@@ -971,18 +971,18 @@ async def put_raster(
"""`Put raster `
Args:
- feature_type_external_id (str): No description.
- feature_external_id (str): one feature or a list of features to create
- raster_property_name (str): the raster property name
- raster_format (str): the raster input format
- raster_srid (int): the associated SRID for the raster
- file (str | Path): the path to the file of the raster
- allow_crs_transformation (bool): When the parameter is false, requests with rasters in Coordinate Reference System different from the one defined in the feature type will result in bad request response code.
- raster_scale_x (float | None): the X component of the pixel width in units of coordinate reference system
- raster_scale_y (float | None): the Y component of the pixel height in units of coordinate reference system
+ feature_type_external_id: No description.
+ feature_external_id: one feature or a list of features to create
+ raster_property_name: the raster property name
+ raster_format: the raster input format
+ raster_srid: the associated SRID for the raster
+ file: the path to the file of the raster
+ allow_crs_transformation: When the parameter is false, requests with rasters in Coordinate Reference System different from the one defined in the feature type will result in bad request response code.
+ raster_scale_x: the X component of the pixel width in units of coordinate reference system
+ raster_scale_y: the Y component of the pixel height in units of coordinate reference system
Returns:
- RasterMetadata: the raster metadata if it was ingested successfully
+ the raster metadata if it was ingested successfully
Examples:
@@ -1027,9 +1027,9 @@ async def delete_raster(
"""`Delete raster `
Args:
- feature_type_external_id (str): No description.
- feature_external_id (str): one feature or a list of features to create
- raster_property_name (str): the raster property name
+ feature_type_external_id: No description.
+ feature_external_id: one feature or a list of features to create
+ raster_property_name: the raster property name
Examples:
@@ -1063,18 +1063,18 @@ async def get_raster(
"""`Get raster `
Args:
- feature_type_external_id (str): Feature type definition for the features to create.
- feature_external_id (str): one feature or a list of features to create
- raster_property_name (str): the raster property name
- raster_format (str): the raster output format
- raster_options (dict[str, Any] | None): GDAL raster creation key-value options
- raster_srid (int | None): the SRID for the output raster
- raster_scale_x (float | None): the X component of the output pixel width in units of coordinate reference system
- raster_scale_y (float | None): the Y component of the output pixel height in units of coordinate reference system
- allow_crs_transformation (bool): When the parameter is false, requests with output rasters in Coordinate Reference System different from the one defined in the feature type will result in bad request response code.
+ feature_type_external_id: Feature type definition for the features to create.
+ feature_external_id: one feature or a list of features to create
+ raster_property_name: the raster property name
+ raster_format: the raster output format
+ raster_options: GDAL raster creation key-value options
+ raster_srid: the SRID for the output raster
+ raster_scale_x: the X component of the output pixel width in units of coordinate reference system
+ raster_scale_y: the Y component of the output pixel height in units of coordinate reference system
+ allow_crs_transformation: When the parameter is false, requests with output rasters in Coordinate Reference System different from the one defined in the feature type will result in bad request response code.
Returns:
- bytes: the raster data
+ the raster data
Examples:
@@ -1111,10 +1111,10 @@ async def compute(
"""`Compute `
Args:
- output (dict[str, GeospatialComputeFunction]): No description.
+ output: No description.
Returns:
- GeospatialComputedResponse: Mapping of keys to computed items.
+ Mapping of keys to computed items.
Examples:
diff --git a/cognite/client/_api/hosted_extractors/destinations.py b/cognite/client/_api/hosted_extractors/destinations.py
index 90ad499e61..238d422ca2 100644
--- a/cognite/client/_api/hosted_extractors/destinations.py
+++ b/cognite/client/_api/hosted_extractors/destinations.py
@@ -47,11 +47,11 @@ async def __call__(
Fetches Destination as they are iterated over, so you keep a limited number of destinations in memory.
Args:
- chunk_size (int | None): Number of Destinations to return in each chunk. Defaults to yielding one Destination a time.
- limit (int | None): Maximum number of Destination to return. Defaults to returning all items.
+ chunk_size: Number of Destinations to return in each chunk. Defaults to yielding one Destination a time.
+ limit: Maximum number of Destination to return. Defaults to returning all items.
Yields:
- Destination | DestinationList: yields Destination one by one if chunk_size is not specified, else DestinationList objects.
+ yields Destination one by one if chunk_size is not specified, else DestinationList objects.
""" # noqa: DOC404
self._warning.warn()
@@ -79,12 +79,11 @@ async def retrieve(
"""`Retrieve one or more destinations. `_
Args:
- external_ids (str | SequenceNotStr[str]): The external ID provided by the client. Must be unique for the resource type.
- ignore_unknown_ids (bool): Ignore external IDs that are not found
-
+ external_ids: The external ID provided by the client. Must be unique for the resource type.
+ ignore_unknown_ids: Ignore external IDs that are not found
Returns:
- Destination | DestinationList: Requested destinations
+ Requested destinations
Examples:
@@ -113,9 +112,9 @@ async def delete(
"""`Delete one or more destsinations `_
Args:
- external_ids (str | SequenceNotStr[str]): The external ID provided by the client. Must be unique for the resource type.
- ignore_unknown_ids (bool): Ignore external IDs that are not found
- force (bool): Delete any jobs associated with each item.
+ external_ids: The external ID provided by the client. Must be unique for the resource type.
+ ignore_unknown_ids: Ignore external IDs that are not found
+ force: Delete any jobs associated with each item.
Examples:
@@ -151,10 +150,10 @@ async def create(self, items: DestinationWrite | Sequence[DestinationWrite]) ->
"""`Create one or more destinations. `_
Args:
- items (DestinationWrite | Sequence[DestinationWrite]): Destination(s) to create.
+ items: Destination(s) to create.
Returns:
- Destination | DestinationList: Created destination(s)
+ Created destination(s)
Examples:
@@ -198,11 +197,11 @@ async def update(
"""`Update one or more destinations. `_
Args:
- items (DestinationWrite | DestinationUpdate | Sequence[DestinationWrite | DestinationUpdate]): Destination(s) to update.
- mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (DestinationWrite). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`.
+ items: Destination(s) to update.
+ mode: How to update data when a non-update object is given (DestinationWrite). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`.
Returns:
- Destination | DestinationList: Updated destination(s)
+ Updated destination(s)
Examples:
@@ -232,10 +231,10 @@ async def list(
"""`List destinations `_
Args:
- limit (int | None): Maximum number of destinations to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
+ limit: Maximum number of destinations to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
Returns:
- DestinationList: List of requested destinations
+ List of requested destinations
Examples:
diff --git a/cognite/client/_api/hosted_extractors/jobs.py b/cognite/client/_api/hosted_extractors/jobs.py
index a21842d5e3..f2d7087170 100644
--- a/cognite/client/_api/hosted_extractors/jobs.py
+++ b/cognite/client/_api/hosted_extractors/jobs.py
@@ -51,11 +51,11 @@ async def __call__(
Fetches jobs as they are iterated over, so you keep a limited number of jobs in memory.
Args:
- chunk_size (int | None): Number of jobs to return in each chunk. Defaults to yielding one job a time.
- limit (int | None): Maximum number of jobs to return. Defaults to returning all items.
+ chunk_size: Number of jobs to return in each chunk. Defaults to yielding one job a time.
+ limit: Maximum number of jobs to return. Defaults to returning all items.
Yields:
- Job | JobList: yields Job one by one if chunk_size is not specified, else JobList objects.
+ yields Job one by one if chunk_size is not specified, else JobList objects.
""" # noqa: DOC404
self._warning.warn()
async for item in self._list_generator(
@@ -80,11 +80,11 @@ async def retrieve(
"""`Retrieve one or more jobs. `_
Args:
- external_ids (str | SequenceNotStr[str]): The external ID provided by the client. Must be unique for the job type.
- ignore_unknown_ids (bool): Ignore external IDs that are not found
+ external_ids: The external ID provided by the client. Must be unique for the job type.
+ ignore_unknown_ids: Ignore external IDs that are not found
Returns:
- Job | None | JobList: Requested jobs
+ Requested jobs
Examples:
@@ -115,8 +115,8 @@ async def delete(
"""`Delete one or more jobs `_
Args:
- external_ids (str | SequenceNotStr[str]): The external ID provided by the client. Must be unique for the resource type.
- ignore_unknown_ids (bool): Ignore external IDs that are not found
+ external_ids: The external ID provided by the client. Must be unique for the resource type.
+ ignore_unknown_ids: Ignore external IDs that are not found
Examples:
Delete jobs by external id:
@@ -149,10 +149,10 @@ async def create(self, items: JobWrite | Sequence[JobWrite]) -> Job | JobList:
"""`Create one or more jobs. `_
Args:
- items (JobWrite | Sequence[JobWrite]): Job(s) to create.
+ items: Job(s) to create.
Returns:
- Job | JobList: Created job(s)
+ Created job(s)
Examples:
@@ -196,11 +196,11 @@ async def update(
"""`Update one or more jobs. `_
Args:
- items (JobWrite | JobUpdate | Sequence[JobWrite | JobUpdate]): Job(s) to update.
- mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (JobWrite). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`.
+ items: Job(s) to update.
+ mode: How to update data when a non-update object is given (JobWrite). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`.
Returns:
- Job | JobList: Updated job(s)
+ Updated job(s)
Examples:
@@ -230,10 +230,10 @@ async def list(
"""`List jobs `_
Args:
- limit (int | None): Maximum number of jobs to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
+ limit: Maximum number of jobs to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
Returns:
- JobList: List of requested jobs
+ List of requested jobs
Examples:
@@ -273,13 +273,13 @@ async def list_logs(
"""`List job logs. `_
Args:
- job (str | None): Require returned logs to belong to the job given by this external ID.
- source (str | None): Require returned logs to belong to the any job with source given by this external ID.
- destination (str | None): Require returned logs to belong to the any job with destination given by this external ID.
- limit (int | None): Maximum number of logs to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
+ job: Require returned logs to belong to the job given by this external ID.
+ source: Require returned logs to belong to the any job with source given by this external ID.
+ destination: Require returned logs to belong to the any job with destination given by this external ID.
+ limit: Maximum number of logs to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
Returns:
- JobLogsList: List of requested job logs
+ List of requested job logs
Examples:
@@ -319,13 +319,13 @@ async def list_metrics(
"""`List job metrics. `_
Args:
- job (str | None): Require returned metrics to belong to the job given by this external ID.
- source (str | None): Require returned metrics to belong to the any job with source given by this external ID.
- destination (str | None): Require returned metrics to belong to the any job with destination given by this external ID.
- limit (int | None): Maximum number of metrics to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
+ job: Require returned metrics to belong to the job given by this external ID.
+ source: Require returned metrics to belong to the any job with source given by this external ID.
+ destination: Require returned metrics to belong to the any job with destination given by this external ID.
+ limit: Maximum number of metrics to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
Returns:
- JobMetricsList: List of requested job metrics
+ List of requested job metrics
Examples:
diff --git a/cognite/client/_api/hosted_extractors/mappings.py b/cognite/client/_api/hosted_extractors/mappings.py
index 1d513b0af5..bd03895577 100644
--- a/cognite/client/_api/hosted_extractors/mappings.py
+++ b/cognite/client/_api/hosted_extractors/mappings.py
@@ -42,11 +42,11 @@ async def __call__(
Fetches Mapping as they are iterated over, so you keep a limited number of mappings in memory.
Args:
- chunk_size (int | None): Number of Mappings to return in each chunk. Defaults to yielding one mapping at a time.
- limit (int | None): Maximum number of mappings to return. Defaults to returning all items.
+ chunk_size: Number of Mappings to return in each chunk. Defaults to yielding one mapping at a time.
+ limit: Maximum number of mappings to return. Defaults to returning all items.
Yields:
- Mapping | MappingList: yields Mapping one by one if chunk_size is not specified, else MappingList objects.
+ yields Mapping one by one if chunk_size is not specified, else MappingList objects.
""" # noqa: DOC404
self._warning.warn()
@@ -72,12 +72,11 @@ async def retrieve(
"""`Retrieve one or more mappings. `_
Args:
- external_ids (str | SequenceNotStr[str]): The external ID provided by the client. Must be unique for the resource type.
- ignore_unknown_ids (bool): Ignore external IDs that are not found
-
+ external_ids: The external ID provided by the client. Must be unique for the resource type.
+ ignore_unknown_ids: Ignore external IDs that are not found
Returns:
- Mapping | MappingList: Requested mappings
+ Requested mappings
Examples:
@@ -106,9 +105,9 @@ async def delete(
"""`Delete one or more mappings `_
Args:
- external_ids (str | SequenceNotStr[str]): The external ID provided by the client. Must be unique for the resource type.
- ignore_unknown_ids (bool): Ignore external IDs that are not found
- force (bool): Delete any jobs associated with each item.
+ external_ids: The external ID provided by the client. Must be unique for the resource type.
+ ignore_unknown_ids: Ignore external IDs that are not found
+ force: Delete any jobs associated with each item.
Examples:
@@ -143,10 +142,10 @@ async def create(self, items: MappingWrite | Sequence[MappingWrite]) -> Mapping
"""`Create one or more mappings. `_
Args:
- items (MappingWrite | Sequence[MappingWrite]): Mapping(s) to create.
+ items: Mapping(s) to create.
Returns:
- Mapping | MappingList: Created mapping(s)
+ Created mapping(s)
Examples:
@@ -180,10 +179,10 @@ async def update(
"""`Update one or more mappings. `_
Args:
- items (MappingWrite | MappingUpdate | Sequence[MappingWrite | MappingUpdate]): Mapping(s) to update.
+ items: Mapping(s) to update.
Returns:
- Mapping | MappingList: Updated mapping(s)
+ Updated mapping(s)
Examples:
@@ -212,10 +211,10 @@ async def list(
"""`List mappings `_
Args:
- limit (int | None): Maximum number of mappings to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
+ limit: Maximum number of mappings to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
Returns:
- MappingList: List of requested mappings
+ List of requested mappings
Examples:
diff --git a/cognite/client/_api/hosted_extractors/sources.py b/cognite/client/_api/hosted_extractors/sources.py
index 87cfd367b0..3bc6bdf23e 100644
--- a/cognite/client/_api/hosted_extractors/sources.py
+++ b/cognite/client/_api/hosted_extractors/sources.py
@@ -43,11 +43,11 @@ async def __call__(
Fetches sources as they are iterated over, so you keep a limited number of sources in memory.
Args:
- chunk_size (int | None): Number of sources to return in each chunk. Defaults to yielding one source a time.
- limit (int | None): Maximum number of sources to return. Defaults to returning all items.
+ chunk_size: Number of sources to return in each chunk. Defaults to yielding one source a time.
+ limit: Maximum number of sources to return. Defaults to returning all items.
Yields:
- Source | SourceList: yields Source one by one if chunk_size is not specified, else SourceList objects.
+ yields Source one by one if chunk_size is not specified, else SourceList objects.
""" # noqa: DOC404
self._warning.warn()
@@ -73,11 +73,11 @@ async def retrieve(
"""`Retrieve one or more sources. `_
Args:
- external_ids (str | SequenceNotStr[str]): The external ID provided by the client. Must be unique for the resource type.
- ignore_unknown_ids (bool): Ignore external IDs that are not found rather than throw an exception.
+ external_ids: The external ID provided by the client. Must be unique for the resource type.
+ ignore_unknown_ids: Ignore external IDs that are not found rather than throw an exception.
Returns:
- Source | SourceList: Requested sources
+ Requested sources
Examples:
@@ -106,9 +106,9 @@ async def delete(
"""`Delete one or more sources `_
Args:
- external_ids (str | SequenceNotStr[str]): The external ID provided by the client. Must be unique for the resource type.
- ignore_unknown_ids (bool): Ignore external IDs that are not found rather than throw an exception.
- force (bool): Delete any jobs associated with each item.
+ external_ids: The external ID provided by the client. Must be unique for the resource type.
+ ignore_unknown_ids: Ignore external IDs that are not found rather than throw an exception.
+ force: Delete any jobs associated with each item.
Examples:
Delete sources by id:
@@ -142,10 +142,10 @@ async def create(self, items: SourceWrite | Sequence[SourceWrite]) -> Source | S
"""`Create one or more sources. `_
Args:
- items (SourceWrite | Sequence[SourceWrite]): Source(s) to create.
+ items: Source(s) to create.
Returns:
- Source | SourceList: Created source(s)
+ Created source(s)
Examples:
@@ -189,11 +189,11 @@ async def update(
"""`Update one or more sources. `_
Args:
- items (SourceWrite | SourceUpdate | Sequence[SourceWrite | SourceUpdate]): Source(s) to update.
- mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (SourceWrite). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`.
+ items: Source(s) to update.
+ mode: How to update data when a non-update object is given (SourceWrite). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`.
Returns:
- Source | SourceList: Updated source(s)
+ Updated source(s)
Examples:
@@ -236,10 +236,10 @@ async def list(
"""`List sources `_
Args:
- limit (int | None): Maximum number of sources to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
+ limit: Maximum number of sources to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
Returns:
- SourceList: List of requested sources
+ List of requested sources
Examples:
diff --git a/cognite/client/_api/iam/__init__.py b/cognite/client/_api/iam/__init__.py
index 067556b127..45e9c2349e 100644
--- a/cognite/client/_api/iam/__init__.py
+++ b/cognite/client/_api/iam/__init__.py
@@ -113,14 +113,12 @@ def compare_capabilities(
``client.iam.verify_capabilities`` instead.
Args:
- existing_capabilities (ComparableCapability): List of existing capabilities.
- desired_capabilities (ComparableCapability): List of wanted capabilities to check against existing.
- project (str | None): If a ProjectCapability or ProjectCapabilityList is passed, we need to know which CDF project
- to pull capabilities from (existing might be from several). If project is not passed, and ProjectCapabilityList
- is used, it will be inferred from the AsyncCogniteClient used to call retrieve it via token/inspect.
+ existing_capabilities: List of existing capabilities.
+ desired_capabilities: List of wanted capabilities to check against existing.
+ project: If a ProjectCapability or ProjectCapabilityList is passed, we need to know which CDF project to pull capabilities from (existing might be from several). If project is not passed, and ProjectCapabilityList is used, it will be inferred from the AsyncCogniteClient used to call retrieve it via token/inspect.
Returns:
- list[Capability]: A flattened list of the missing capabilities, meaning they each have exactly 1 action, 1 scope, 1 id etc.
+ A flattened list of the missing capabilities, meaning they each have exactly 1 action, 1 scope, 1 id etc.
Examples:
@@ -210,10 +208,10 @@ async def verify_capabilities(self, desired_capabilities: ComparableCapability)
"""Helper method to compare your current capabilities with a set of desired capabilities and return any missing.
Args:
- desired_capabilities (ComparableCapability): List of desired capabilities to check against existing.
+ desired_capabilities: List of desired capabilities to check against existing.
Returns:
- list[Capability]: A flattened list of the missing capabilities, meaning they each have exactly 1 action, 1 scope, 1 id etc.
+ A flattened list of the missing capabilities, meaning they each have exactly 1 action, 1 scope, 1 id etc.
Examples:
diff --git a/cognite/client/_api/iam/groups.py b/cognite/client/_api/iam/groups.py
index 06b4aa77a4..ffd5bb8d23 100644
--- a/cognite/client/_api/iam/groups.py
+++ b/cognite/client/_api/iam/groups.py
@@ -49,10 +49,10 @@ async def list(self, all: bool = False) -> GroupList:
"""`List groups. `_
Args:
- all (bool): Whether to get all groups, only available with the groups:list acl.
+ all: Whether to get all groups, only available with the groups:list acl.
Returns:
- GroupList: List of groups.
+ List of groups.
Example:
@@ -82,9 +82,9 @@ async def create(self, group: Group | GroupWrite | Sequence[Group] | Sequence[Gr
"""`Create one or more groups. `_
Args:
- group (Group | GroupWrite | Sequence[Group] | Sequence[GroupWrite]): Group or list of groups to create.
+ group: Group or list of groups to create.
Returns:
- Group | GroupList: The created group(s).
+ The created group(s).
Example:
@@ -149,7 +149,7 @@ async def delete(self, id: int | Sequence[int]) -> None:
"""`Delete one or more groups. `_
Args:
- id (int | Sequence[int]): ID or list of IDs of groups to delete.
+ id: ID or list of IDs of groups to delete.
Example:
diff --git a/cognite/client/_api/iam/security_categories.py b/cognite/client/_api/iam/security_categories.py
index c2f9beb453..79c87af400 100644
--- a/cognite/client/_api/iam/security_categories.py
+++ b/cognite/client/_api/iam/security_categories.py
@@ -17,10 +17,10 @@ async def list(self, limit: int | None = DEFAULT_LIMIT_READ) -> SecurityCategory
"""`List security categories. `_
Args:
- limit (int | None): Max number of security categories to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
+ limit: Max number of security categories to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
Returns:
- SecurityCategoryList: List of security categories
+ List of security categories
Example:
@@ -51,10 +51,10 @@ async def create(
"""`Create one or more security categories. `_
Args:
- security_category (SecurityCategory | SecurityCategoryWrite | Sequence[SecurityCategory] | Sequence[SecurityCategoryWrite]): Security category or list of categories to create.
+ security_category: Security category or list of categories to create.
Returns:
- SecurityCategory | SecurityCategoryList: The created security category or categories.
+ The created security category or categories.
Example:
@@ -78,7 +78,7 @@ async def delete(self, id: int | Sequence[int]) -> None:
"""`Delete one or more security categories. `_
Args:
- id (int | Sequence[int]): ID or list of IDs of security categories to delete.
+ id: ID or list of IDs of security categories to delete.
Example:
diff --git a/cognite/client/_api/iam/sessions.py b/cognite/client/_api/iam/sessions.py
index c4dd245bd5..aa391c7c48 100644
--- a/cognite/client/_api/iam/sessions.py
+++ b/cognite/client/_api/iam/sessions.py
@@ -33,13 +33,8 @@ async def create(
"""`Create a session. `_
Args:
- client_credentials (ClientCredentials | None): The client credentials to create the session. This is required
- if session_type is set to 'CLIENT_CREDENTIALS'.
- session_type (SessionType | Literal['DEFAULT']): The type of session to create. Can be
- either 'CLIENT_CREDENTIALS', 'TOKEN_EXCHANGE', 'ONESHOT_TOKEN_EXCHANGE' or 'DEFAULT'.
- Defaults to 'DEFAULT' which will use -this- AsyncCogniteClient object to create the session.
- If this client was created using a token, 'TOKEN_EXCHANGE' will be used, and if
- this client was created using client credentials, 'CLIENT_CREDENTIALS' will be used.
+ client_credentials: The client credentials to create the session. This is required if session_type is set to 'CLIENT_CREDENTIALS'.
+ session_type: The type of session to create. Can be either 'CLIENT_CREDENTIALS', 'TOKEN_EXCHANGE', 'ONESHOT_TOKEN_EXCHANGE' or 'DEFAULT'. Defaults to 'DEFAULT' which will use -this- AsyncCogniteClient object to create the session. If this client was created using a token, 'TOKEN_EXCHANGE' will be used, and if this client was created using client credentials, 'CLIENT_CREDENTIALS' will be used.
Session Types:
@@ -48,7 +43,7 @@ async def create(
* **one_shot_token_exchange**: Credentials for a session using one-shot token exchange to reuse the user's credentials. One-shot sessions are short-lived sessions that are not refreshed and do not require support for token exchange from the identity provider.
Returns:
- CreatedSession: The object with token inspection details.
+ The object with token inspection details.
"""
if client_credentials is None and isinstance(creds := self._config.credentials, OAuthClientCredentials):
client_credentials = ClientCredentials(creds.client_id, creds.client_secret)
@@ -86,10 +81,10 @@ async def revoke(self, id: int | Sequence[int]) -> Session | SessionList:
"""`Revoke access to a session. Revocation of a session may in some cases take up to 1 hour to take effect. `_
Args:
- id (int | Sequence[int]): Id or list of session ids
+ id: Id or list of session ids
Returns:
- Session | SessionList: List of revoked sessions. If the user does not have the sessionsAcl:LIST capability, then only the session IDs will be present in the response.
+ LIST capability, then only the session IDs will be present in the response.
"""
ident_sequence = IdentifierSequence.load(ids=id, external_ids=None)
@@ -119,10 +114,10 @@ async def retrieve(self, id: int | Sequence[int]) -> Session | SessionList:
The request will fail if any of the IDs does not belong to an existing session.
Args:
- id (int | Sequence[int]): Id or list of session ids
+ id: Id or list of session ids
Returns:
- Session | SessionList: Session or list of sessions.
+ Session or list of sessions.
"""
identifiers = IdentifierSequence.load(ids=id, external_ids=None)
@@ -136,11 +131,11 @@ async def list(self, status: SessionStatus | None = None, limit: int = DEFAULT_L
"""`List all sessions in the current project. `_
Args:
- status (SessionStatus | None): If given, only sessions with the given status are returned.
- limit (int): Max number of sessions to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
+ status: If given, only sessions with the given status are returned.
+ limit: Max number of sessions to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
Returns:
- SessionList: a list of sessions in the current project.
+ a list of sessions in the current project.
"""
filter = {"status": status.upper()} if status is not None else None
return await self._list(list_cls=SessionList, resource_cls=Session, method="GET", filter=filter, limit=limit)
diff --git a/cognite/client/_api/iam/token.py b/cognite/client/_api/iam/token.py
index 48b477b70e..3f154f8baf 100644
--- a/cognite/client/_api/iam/token.py
+++ b/cognite/client/_api/iam/token.py
@@ -11,7 +11,7 @@ async def inspect(self) -> TokenInspection:
Get details about which projects it belongs to and which capabilities are granted to it.
Returns:
- TokenInspection: The object with token inspection details.
+ The object with token inspection details.
Example:
diff --git a/cognite/client/_api/labels.py b/cognite/client/_api/labels.py
index 6a1aeff093..79cbdc36bb 100644
--- a/cognite/client/_api/labels.py
+++ b/cognite/client/_api/labels.py
@@ -54,15 +54,15 @@ async def __call__(
"""Iterate over Labels
Args:
- chunk_size (int | None): Number of Labels to return in each chunk. Defaults to yielding one Label a time.
- name (str | None): returns the label definitions matching that name
- external_id_prefix (str | None): filter label definitions with external ids starting with the prefix specified
- limit (int | None): Maximum number of label definitions to return. Defaults return all labels.
- data_set_ids (int | Sequence[int] | None): return only labels in the data sets with this id / these ids.
- data_set_external_ids (str | SequenceNotStr[str] | None): return only labels in the data sets with this external id / these external ids.
+ chunk_size: Number of Labels to return in each chunk. Defaults to yielding one Label a time.
+ name: returns the label definitions matching that name
+ external_id_prefix: filter label definitions with external ids starting with the prefix specified
+ limit: Maximum number of label definitions to return. Defaults return all labels.
+ data_set_ids: return only labels in the data sets with this id / these ids.
+ data_set_external_ids: return only labels in the data sets with this external id / these external ids.
Yields:
- LabelDefinition | LabelDefinitionList: yields Labels one by one or in chunks.
+ yields Labels one by one or in chunks.
""" # noqa: DOC404
data_set_ids_processed = process_data_set_ids(data_set_ids, data_set_external_ids)
@@ -97,11 +97,11 @@ async def retrieve(
"""`Retrieve one or more label definitions by external id. `_
Args:
- external_id (str | SequenceNotStr[str]): External ID or list of external ids
- ignore_unknown_ids (bool): If True, ignore IDs and external IDs that are not found rather than throw an exception.
+ external_id: External ID or list of external ids
+ ignore_unknown_ids: If True, ignore IDs and external IDs that are not found rather than throw an exception.
Returns:
- LabelDefinition | LabelDefinitionList | None: The requested label definition(s)
+ The requested label definition(s)
Examples:
@@ -137,14 +137,14 @@ async def list(
"""`List Labels `_
Args:
- name (str | None): returns the label definitions matching that name
- external_id_prefix (str | None): filter label definitions with external ids starting with the prefix specified
- data_set_ids (int | Sequence[int] | None): return only labels in the data sets with this id / these ids.
- data_set_external_ids (str | SequenceNotStr[str] | None): return only labels in the data sets with this external id / these external ids.
- limit (int | None): Maximum number of label definitions to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
+ name: returns the label definitions matching that name
+ external_id_prefix: filter label definitions with external ids starting with the prefix specified
+ data_set_ids: return only labels in the data sets with this id / these ids.
+ data_set_external_ids: return only labels in the data sets with this external id / these external ids.
+ limit: Maximum number of label definitions to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
Returns:
- LabelDefinitionList: List of requested Labels
+ List of requested Labels
Examples:
@@ -186,10 +186,10 @@ async def create(
"""`Create one or more label definitions. `_
Args:
- label (LabelDefinition | LabelDefinitionWrite | Sequence[LabelDefinition | LabelDefinitionWrite]): The label definition(s) to create.
+ label: The label definition(s) to create.
Returns:
- LabelDefinition | LabelDefinitionList: Created label definition(s)
+ Created label definition(s)
Raises:
TypeError: Function input 'label' is of the wrong type
@@ -217,7 +217,7 @@ async def delete(self, external_id: str | SequenceNotStr[str] | None = None) ->
"""`Delete one or more label definitions `_
Args:
- external_id (str | SequenceNotStr[str] | None): One or more label external ids
+ external_id: One or more label external ids
Examples:
diff --git a/cognite/client/_api/limits.py b/cognite/client/_api/limits.py
index ff9fadc045..0c14531e44 100644
--- a/cognite/client/_api/limits.py
+++ b/cognite/client/_api/limits.py
@@ -31,13 +31,10 @@ async def retrieve(self, id: str) -> Limit | None:
Retrieves a limit value by its `limitId`.
Args:
- id (str): Limit ID to retrieve.
- Limits are identified by an id containing the service name and a service-scoped limit name.
- For instance `atlas.monthly_ai_tokens` is the id of the `atlas` service limit `monthly_ai_tokens`.
- Service and limit names are always in `lower_snake_case`.
+ id: Limit ID to retrieve. Limits are identified by an id containing the service name and a service-scoped limit name. For instance `atlas.monthly_ai_tokens` is the id of the `atlas` service limit `monthly_ai_tokens`. Service and limit names are always in `lower_snake_case`.
Returns:
- Limit | None: The requested limit, or `None` if not found.
+ The requested limit, or `None` if not found.
Examples:
@@ -64,11 +61,11 @@ async def list(self, filter: Prefix | None = None, limit: int | None = DEFAULT_L
Retrieves all limit values for a specific project. Optionally filter by limit ID prefix using a `Prefix` filter.
Args:
- filter (Prefix | None): Optional `Prefix` filter to apply on the `limitId` property (only `Prefix` filters are supported).
- limit (int | None): Maximum number of limits to return. Defaults to 25. Set to None or -1 to return all limits
+ filter: Optional `Prefix` filter to apply on the `limitId` property (only `Prefix` filters are supported).
+ limit: Maximum number of limits to return. Defaults to 25. Set to None or -1 to return all limits
Returns:
- LimitList: List of all limit values in the project.
+ List of all limit values in the project.
Examples:
diff --git a/cognite/client/_api/org_apis/principals.py b/cognite/client/_api/org_apis/principals.py
index 057bc0ce59..3696e7eb96 100644
--- a/cognite/client/_api/org_apis/principals.py
+++ b/cognite/client/_api/org_apis/principals.py
@@ -18,8 +18,7 @@ async def me(self) -> Principal:
"""`Get the current caller's information. `_
Returns:
- Principal: The principal of the user running the code, i.e. the
- principal *this* AsyncCogniteClient was instantiated with.
+ The principal of the user running the code, i.e. the principal *this* AsyncCogniteClient was instantiated with.
Examples:
Get your own principal:
@@ -85,12 +84,12 @@ async def retrieve(
"""`Retrieve principal by reference in the organization `_
Args:
- id (str | SequenceNotStr[str] | None): The ID(s) of the principal(s) to retrieve.
- external_id (str | SequenceNotStr[str] | None): The external ID(s) of the principal to retrieve.
- ignore_unknown_ids (bool): This is only relevant when retrieving multiple principals. If set to True, the method will return the principals that were found and ignore the ones that were not found. If set to False, the method will raise a CogniteAPIError if any of the specified principals were not found. Defaults to False.
+ id: The ID(s) of the principal(s) to retrieve.
+ external_id: The external ID(s) of the principal to retrieve.
+ ignore_unknown_ids: This is only relevant when retrieving multiple principals. If set to True, the method will return the principals that were found and ignore the ones that were not found. If set to False, the method will raise a CogniteAPIError if any of the specified principals were not found. Defaults to False.
Returns:
- Principal | PrincipalList | None: The principal(s) with the specified ID(s) or external ID(s).
+ The principal(s) with the specified ID(s) or external ID(s).
Examples:
Retrieve a principal by ID:
@@ -114,11 +113,11 @@ async def list(self, types: str | Sequence[str] | None = None, limit: int = DEFA
"""`List principals in the organization `_
Args:
- types (str | Sequence[str] | None): Filter by principal type(s). Defaults to None, which means no filtering.
- limit (int): The maximum number of principals to return. Defaults to 25.
+ types: Filter by principal type(s). Defaults to None, which means no filtering.
+ limit: The maximum number of principals to return. Defaults to 25.
Returns:
- PrincipalList: The principal of the user running the code, i.e. the principal *this* CogniteClient was instantiated with.
+ The principal of the user running the code, i.e. the principal *this* CogniteClient was instantiated with.
Examples:
List principals in the organization:
diff --git a/cognite/client/_api/postgres_gateway/tables.py b/cognite/client/_api/postgres_gateway/tables.py
index f2de65c6c0..2032633df3 100644
--- a/cognite/client/_api/postgres_gateway/tables.py
+++ b/cognite/client/_api/postgres_gateway/tables.py
@@ -38,11 +38,11 @@ async def __call__(
Fetches custom tables as they are iterated over, so you keep a limited number of custom tables in memory.
Args:
- chunk_size (int | None): Number of custom tables to return in each chunk. Defaults to yielding one custom table at a time.
- limit (int | None): Maximum number of custom tables to return. Defaults to return all.
+ chunk_size: Number of custom tables to return in each chunk. Defaults to yielding one custom table at a time.
+ limit: Maximum number of custom tables to return. Defaults to return all.
Yields:
- pg.Table | pg.TableList: yields Table one by one if chunk_size is not specified, else TableList objects.
+ yields Table one by one if chunk_size is not specified, else TableList objects.
"""
async for item in self._list_generator( # type: ignore [call-overload]
list_cls=pg.TableList,
@@ -63,11 +63,11 @@ async def create(self, username: str, items: pg.TableWrite | Sequence[pg.TableWr
"""`Create tables `_
Args:
- username (str): The name of the username (a.k.a. database) to be managed from the API
- items (pg.TableWrite | Sequence[pg.TableWrite]): The table(s) to create
+ username: The name of the username (a.k.a. database) to be managed from the API
+ items: The table(s) to create
Returns:
- pg.Table | pg.TableList: Created tables
+ Created tables
Examples:
@@ -109,12 +109,12 @@ async def retrieve(
Retrieve a list of Postgres tables for a user by their table names, optionally ignoring unknown table names
Args:
- username (str): The username (a.k.a. database) to be managed from the API
- tablename (str | SequenceNotStr[str]): The name of the table(s) to be retrieved
- ignore_unknown_ids (bool): Ignore table names not found
+ username: The username (a.k.a. database) to be managed from the API
+ tablename: The name of the table(s) to be retrieved
+ ignore_unknown_ids: Ignore table names not found
Returns:
- pg.Table | pg.TableList | None: Foreign tables
+ Foreign tables
Examples:
@@ -144,9 +144,9 @@ async def delete(
"""`Delete postgres table(s) `_
Args:
- username (str): The name of the username (a.k.a. database) to be managed from the API
- tablename (str | SequenceNotStr[str]): The name of the table(s) to be deleted
- ignore_unknown_ids (bool): Ignore table names that are not found
+ username: The name of the username (a.k.a. database) to be managed from the API
+ tablename: The name of the table(s) to be deleted
+ ignore_unknown_ids: Ignore table names that are not found
Examples:
@@ -178,12 +178,12 @@ async def list(
List all tables in a given project.
Args:
- username (str): The name of the username (a.k.a. database) to be managed from the API
- include_built_ins (Literal['yes', 'no'] | None): Determines if API should return built-in tables or not
- limit (int | None): Limits the number of results to be returned.
+ username: The name of the username (a.k.a. database) to be managed from the API
+ include_built_ins: Determines if API should return built-in tables or not
+ limit: Limits the number of results to be returned.
Returns:
- pg.TableList: Foreign tables
+ Foreign tables
Examples:
diff --git a/cognite/client/_api/postgres_gateway/users.py b/cognite/client/_api/postgres_gateway/users.py
index ac86bb2616..4d1ad9e885 100644
--- a/cognite/client/_api/postgres_gateway/users.py
+++ b/cognite/client/_api/postgres_gateway/users.py
@@ -45,11 +45,11 @@ async def __call__(
Fetches user as they are iterated over, so you keep a limited number of users in memory.
Args:
- chunk_size (int | None): Number of users to return in each chunk. Defaults to yielding one user at a time.
- limit (int | None): Maximum number of users to return. Defaults to return all.
+ chunk_size: Number of users to return in each chunk. Defaults to yielding one user at a time.
+ limit: Maximum number of users to return. Defaults to return all.
Yields:
- User | UserList: yields User one by one if chunk_size is not specified, else UserList objects.
+ yields User one by one if chunk_size is not specified, else UserList objects.
""" # noqa: DOC404
async for item in self._list_generator(
list_cls=UserList,
@@ -72,10 +72,10 @@ async def create(self, user: UserWrite | Sequence[UserWrite]) -> UserCreated | U
Create postgres users.
Args:
- user (UserWrite | Sequence[UserWrite]): The user(s) to create.
+ user: The user(s) to create.
Returns:
- UserCreated | UserCreatedList: The created user(s)
+ The created user(s)
Examples:
@@ -114,10 +114,10 @@ async def update(self, items: UserUpdate | UserWrite | Sequence[UserUpdate | Use
Update postgres users
Args:
- items (UserUpdate | UserWrite | Sequence[UserUpdate | UserWrite]): The user(s) to update.
+ items: The user(s) to update.
Returns:
- User | UserList: The updated user(s)
+ The updated user(s)
Examples:
@@ -150,9 +150,8 @@ async def delete(self, username: str | SequenceNotStr[str], ignore_unknown_ids:
Delete postgres users
Args:
- username (str | SequenceNotStr[str]): Usernames of the users to delete.
- ignore_unknown_ids (bool): Ignore usernames that are not found
-
+ username: Usernames of the users to delete.
+ ignore_unknown_ids: Ignore usernames that are not found
Examples:
@@ -186,11 +185,11 @@ async def retrieve(self, username: str | SequenceNotStr[str], ignore_unknown_ids
Retrieve a list of postgres users by their usernames, optionally ignoring unknown usernames
Args:
- username (str | SequenceNotStr[str]): Usernames of the users to retrieve.
- ignore_unknown_ids (bool): Ignore usernames that are not found
+ username: Usernames of the users to retrieve.
+ ignore_unknown_ids: Ignore usernames that are not found
Returns:
- User | UserList: The retrieved user(s).
+ The retrieved user(s).
Examples:
@@ -215,10 +214,10 @@ async def list(self, limit: int = DEFAULT_LIMIT_READ) -> UserList:
List all users in a given project.
Args:
- limit (int): Limits the number of results to be returned.
+ limit: Limits the number of results to be returned.
Returns:
- UserList: A list of users
+ A list of users
Examples:
diff --git a/cognite/client/_api/raw/databases.py b/cognite/client/_api/raw/databases.py
index f1f9347e9b..7439ff1460 100644
--- a/cognite/client/_api/raw/databases.py
+++ b/cognite/client/_api/raw/databases.py
@@ -37,11 +37,11 @@ async def __call__(
Fetches dbs as they are iterated over, so you keep a limited number of dbs in memory.
Args:
- chunk_size (int | None): Number of dbs to return in each chunk. Defaults to yielding one db a time.
- limit (int | None): Maximum number of dbs to return. Defaults to return all items.
+ chunk_size: Number of dbs to return in each chunk. Defaults to yielding one db a time.
+ limit: Maximum number of dbs to return. Defaults to return all items.
Yields:
- Database | DatabaseList: No description.
+ No description.
""" # noqa: DOC404
async for item in self._list_generator(
list_cls=DatabaseList, resource_cls=Database, chunk_size=chunk_size, method="GET", limit=limit
@@ -58,10 +58,10 @@ async def create(self, name: str | list[str]) -> Database | DatabaseList:
"""`Create one or more databases. `_
Args:
- name (str | list[str]): A db name or list of db names to create.
+ name: A db name or list of db names to create.
Returns:
- Database | DatabaseList: Database or list of databases that has been created.
+ Database or list of databases that has been created.
Examples:
@@ -83,8 +83,8 @@ async def delete(self, name: str | SequenceNotStr[str], recursive: bool = False)
"""`Delete one or more databases. `_
Args:
- name (str | SequenceNotStr[str]): A db name or list of db names to delete.
- recursive (bool): Recursively delete all tables in the database(s).
+ name: A db name or list of db names to delete.
+ recursive: Recursively delete all tables in the database(s).
Examples:
@@ -117,10 +117,10 @@ async def list(self, limit: int | None = DEFAULT_LIMIT_READ) -> DatabaseList:
"""`List databases `_
Args:
- limit (int | None): Maximum number of databases to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
+ limit: Maximum number of databases to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
Returns:
- DatabaseList: List of requested databases.
+ List of requested databases.
Examples:
diff --git a/cognite/client/_api/raw/rows.py b/cognite/client/_api/raw/rows.py
index 7a3c5834e6..26dbfd7e16 100644
--- a/cognite/client/_api/raw/rows.py
+++ b/cognite/client/_api/raw/rows.py
@@ -119,20 +119,17 @@ async def __call__(
by halting retrieval speed when the callers code can't keep up.
Args:
- db_name (str): Name of the database.
- table_name (str): Name of the table.
- chunk_size (int | None): Number of rows to return in each chunk (may be lower). Defaults to yielding one row at a time.
- Note: When used together with 'partitions' the default is 10000 (matching the API limit) and there's an implicit minimum of 1000 rows.
- partitions (int | None): Retrieve rows in parallel using this number of workers. Defaults to not use concurrency.
- The setting is capped at ``global_config.concurrency_settings.raw.read`` and _can_ be used with a finite limit. To prevent unexpected problems
- and maximize read throughput, check out `concurrency limits in the API documentation. `_
- limit (int | None): Maximum number of rows to return. Can be used with partitions. Defaults to returning all items.
- min_last_updated_time (int | None): Rows must have been last updated after this time (exclusive). Milliseconds since epoch.
- max_last_updated_time (int | None): Rows must have been last updated before this time (inclusive). Milliseconds since epoch.
- columns (list[str] | None): List of column keys. Set to `None` to retrieving all, use empty list, [], to retrieve only row keys.
+ db_name: Name of the database.
+ table_name: Name of the table.
+ chunk_size: Number of rows to return in each chunk (may be lower). Defaults to yielding one row at a time. Note: When used together with 'partitions' the default is 10000 (matching the API limit) and there's an implicit minimum of 1000 rows.
+ partitions: Retrieve rows in parallel using this number of workers. Defaults to not use concurrency. The setting is capped at ``global_config.concurrency_settings.raw.read`` and _can_ be used with a finite limit. To prevent unexpected problems and maximize read throughput, check out `concurrency limits in the API documentation. `_
+ limit: Maximum number of rows to return. Can be used with partitions. Defaults to returning all items.
+ min_last_updated_time: Rows must have been last updated after this time (exclusive). Milliseconds since epoch.
+ max_last_updated_time: Rows must have been last updated before this time (inclusive). Milliseconds since epoch.
+ columns: List of column keys. Set to `None` to retrieving all, use empty list, [], to retrieve only row keys.
Yields:
- Row | RowList: An iterator yielding the requested row or rows.
+ An iterator yielding the requested row or rows.
""" # noqa: DOC404
if partitions is None or _RUNNING_IN_BROWSER:
iterator = self._list_generator(
@@ -258,10 +255,10 @@ async def insert(
"""`Insert one or more rows into a table. `_
Args:
- db_name (str): Name of the database.
- table_name (str): Name of the table.
- row (Sequence[Row] | Sequence[RowWrite] | Row | RowWrite | dict): The row(s) to insert
- ensure_parent (bool): Create database/table if they don't already exist.
+ db_name: Name of the database.
+ table_name: Name of the table.
+ row: The row(s) to insert
+ ensure_parent: Create database/table if they don't already exist.
Examples:
@@ -312,11 +309,11 @@ async def insert_dataframe(
Uses index for row keys.
Args:
- db_name (str): Name of the database.
- table_name (str): Name of the table.
- dataframe (pd.DataFrame): The dataframe to insert. Index will be used as row keys.
- ensure_parent (bool): Create database/table if they don't already exist.
- dropna (bool): Remove NaNs (but keep None's in dtype=object columns) before inserting. Done individually per column. Default: True
+ db_name: Name of the database.
+ table_name: Name of the table.
+ dataframe: The dataframe to insert. Index will be used as row keys.
+ ensure_parent: Create database/table if they don't already exist.
+ dropna: Remove NaNs (but keep None's in dtype=object columns) before inserting. Done individually per column. Default: True
Examples:
@@ -383,9 +380,9 @@ async def delete(self, db_name: str, table_name: str, key: str | SequenceNotStr[
"""`Delete rows from a table. `_
Args:
- db_name (str): Name of the database.
- table_name (str): Name of the table.
- key (str | SequenceNotStr[str]): The key(s) of the row(s) to delete.
+ db_name: Name of the database.
+ table_name: Name of the table.
+ key: The key(s) of the row(s) to delete.
Examples:
@@ -422,12 +419,12 @@ async def retrieve(self, db_name: str, table_name: str, key: str) -> Row | None:
"""`Retrieve a single row by key. `_
Args:
- db_name (str): Name of the database.
- table_name (str): Name of the table.
- key (str): The key of the row to retrieve.
+ db_name: Name of the database.
+ table_name: Name of the table.
+ key: The key of the row to retrieve.
Returns:
- Row | None: The requested row.
+ The requested row.
Examples:
@@ -477,21 +474,18 @@ async def retrieve_dataframe(
Rowkeys are used as the index.
Args:
- db_name (str): Name of the database.
- table_name (str): Name of the table.
- min_last_updated_time (int | None): Rows must have been last updated after this time. Milliseconds since epoch.
- max_last_updated_time (int | None): Rows must have been last updated before this time. Milliseconds since epoch.
- columns (list[str] | None): List of column keys. Set to `None` to retrieving all, use empty list, [], to retrieve only row keys.
- limit (int | None): The number of rows to retrieve. Defaults to 25. Set to -1, float("inf") or None to return all items.
- partitions (int | None): Retrieve rows in parallel using this number of workers. Can be used together with a (large) finite limit.
- When partitions is not passed, it defaults to 1, i.e. no concurrency for a finite limit and ``global_config.concurrency_settings.raw.read``
- for an unlimited query (will be capped at this value). To prevent unexpected problems and maximize read throughput, check out
- `concurrency limits in the API documentation. `_
- last_updated_time_in_index (bool): Use a MultiIndex with row keys and last_updated_time as index.
- infer_dtypes (bool): If True, pandas will try to infer dtypes of the columns. Defaults to True.
+ db_name: Name of the database.
+ table_name: Name of the table.
+ min_last_updated_time: Rows must have been last updated after this time. Milliseconds since epoch.
+ max_last_updated_time: Rows must have been last updated before this time. Milliseconds since epoch.
+ columns: List of column keys. Set to `None` to retrieving all, use empty list, [], to retrieve only row keys.
+ limit: The number of rows to retrieve. Defaults to 25. Set to -1, float("inf") or None to return all items.
+ partitions: Retrieve rows in parallel using this number of workers. Can be used together with a (large) finite limit. When partitions is not passed, it defaults to 1, i.e. no concurrency for a finite limit and ``global_config.concurrency_settings.raw.read`` for an unlimited query (will be capped at this value). To prevent unexpected problems and maximize read throughput, check out `concurrency limits in the API documentation. `_
+ last_updated_time_in_index: Use a MultiIndex with row keys and last_updated_time as index.
+ infer_dtypes: If True, pandas will try to infer dtypes of the columns. Defaults to True.
Returns:
- pd.DataFrame: The requested rows in a pandas dataframe.
+ The requested rows in a pandas dataframe.
Examples:
@@ -549,19 +543,16 @@ async def list(
"""`List rows in a table. `_
Args:
- db_name (str): Name of the database.
- table_name (str): Name of the table.
- min_last_updated_time (int | None): Rows must have been last updated after this time (exclusive). Milliseconds since epoch.
- max_last_updated_time (int | None): Rows must have been last updated before this time (inclusive). Milliseconds since epoch.
- columns (list[str] | None): List of column keys. Set to `None` to retrieving all, use empty list, [], to retrieve only row keys.
- limit (int | None): The number of rows to retrieve. Can be used with partitions. Defaults to 25. Set to -1, float("inf") or None to return all items.
- partitions (int | None): Retrieve rows in parallel using this number of workers. Can be used together with a (large) finite limit.
- When partitions is not passed, it defaults to 1, i.e. no concurrency for a finite limit and ``global_config.concurrency_settings.raw.read``
- for an unlimited query (will be capped at this value). To prevent unexpected problems and maximize read throughput, check out
- `concurrency limits in the API documentation. `_
+ db_name: Name of the database.
+ table_name: Name of the table.
+ min_last_updated_time: Rows must have been last updated after this time (exclusive). Milliseconds since epoch.
+ max_last_updated_time: Rows must have been last updated before this time (inclusive). Milliseconds since epoch.
+ columns: List of column keys. Set to `None` to retrieving all, use empty list, [], to retrieve only row keys.
+ limit: The number of rows to retrieve. Can be used with partitions. Defaults to 25. Set to -1, float("inf") or None to return all items.
+ partitions: Retrieve rows in parallel using this number of workers. Can be used together with a (large) finite limit. When partitions is not passed, it defaults to 1, i.e. no concurrency for a finite limit and ``global_config.concurrency_settings.raw.read`` for an unlimited query (will be capped at this value). To prevent unexpected problems and maximize read throughput, check out `concurrency limits in the API documentation. `_
Returns:
- RowList: The requested rows.
+ The requested rows.
Examples:
diff --git a/cognite/client/_api/raw/tables.py b/cognite/client/_api/raw/tables.py
index d58fa059f9..fbef778055 100644
--- a/cognite/client/_api/raw/tables.py
+++ b/cognite/client/_api/raw/tables.py
@@ -38,12 +38,12 @@ async def __call__(
Fetches tables as they are iterated over, so you keep a limited number of tables in memory.
Args:
- db_name (str): Name of the database to iterate over tables for
- chunk_size (int | None): Number of tables to return in each chunk. Defaults to yielding one table a time.
- limit (int | None): Maximum number of tables to return. Defaults to return all items.
+ db_name: Name of the database to iterate over tables for
+ chunk_size: Number of tables to return in each chunk. Defaults to yielding one table a time.
+ limit: Maximum number of tables to return. Defaults to return all items.
Yields:
- raw.Table | raw.TableList: The tables in the database.
+ The tables in the database.
"""
table_iterator = self._list_generator(
list_cls=raw.TableList,
@@ -66,11 +66,11 @@ async def create(self, db_name: str, name: str | list[str]) -> raw.Table | raw.T
"""`Create one or more tables. `_
Args:
- db_name (str): Database to create the tables in.
- name (str | list[str]): A table name or list of table names to create.
+ db_name: Database to create the tables in.
+ name: A table name or list of table names to create.
Returns:
- raw.Table | raw.TableList: raw.Table or list of tables that has been created.
+ raw.Table or list of tables that has been created.
Examples:
@@ -99,8 +99,8 @@ async def delete(self, db_name: str, name: str | SequenceNotStr[str]) -> None:
"""`Delete one or more tables. `_
Args:
- db_name (str): Database to delete tables from.
- name (str | SequenceNotStr[str]): A table name or list of table names to delete.
+ db_name: Database to delete tables from.
+ name: A table name or list of table names to delete.
Examples:
@@ -154,11 +154,11 @@ async def list(self, db_name: str, limit: int | None = DEFAULT_LIMIT_READ) -> ra
"""`List tables `_
Args:
- db_name (str): The database to list tables from.
- limit (int | None): Maximum number of tables to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
+ db_name: The database to list tables from.
+ limit: Maximum number of tables to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
Returns:
- raw.TableList: List of requested tables.
+ List of requested tables.
Examples:
diff --git a/cognite/client/_api/relationships.py b/cognite/client/_api/relationships.py
index 0d013e4eb4..ff1218a1e5 100644
--- a/cognite/client/_api/relationships.py
+++ b/cognite/client/_api/relationships.py
@@ -100,25 +100,25 @@ async def __call__(
Fetches relationships as they are iterated over, so you keep a limited number of relationships in memory.
Args:
- chunk_size (int | None): Number of Relationships to return in each chunk. Defaults to yielding one relationship at a time.
- source_external_ids (SequenceNotStr[str] | None): Include relationships that have any of these values in their source External Id field
- source_types (SequenceNotStr[str] | None): Include relationships that have any of these values in their source Type field
- target_external_ids (SequenceNotStr[str] | None): Include relationships that have any of these values in their target External Id field
- target_types (SequenceNotStr[str] | None): Include relationships that have any of these values in their target Type field
- data_set_ids (int | Sequence[int] | None): Return only relationships in the specified data set(s) with this id / these ids.
- data_set_external_ids (str | SequenceNotStr[str] | None): Return only relationships in the specified data set(s) with this external id / these external ids.
- start_time (dict[str, int] | None): Range between two timestamps, minimum and maximum milliseconds (inclusive)
- end_time (dict[str, int] | None): Range between two timestamps, minimum and maximum milliseconds (inclusive)
- confidence (dict[str, int] | None): Range to filter the field for (inclusive).
- last_updated_time (dict[str, int] | None): Range to filter the field for (inclusive).
- created_time (dict[str, int] | None): Range to filter the field for (inclusive).
- active_at_time (dict[str, int] | None): Limits results to those active at any point within the given time range, i.e. if there is any overlap in the intervals [activeAtTime.min, activeAtTime.max] and [startTime, endTime], where both intervals are inclusive. If a relationship does not have a startTime, it is regarded as active from the beginning of time by this filter. If it does not have an endTime is will be regarded as active until the end of time. Similarly, if a min is not supplied to the filter, the min will be implicitly set to the beginning of time, and if a max is not supplied, the max will be implicitly set to the end of time.
- labels (LabelFilter | None): Return only the resource matching the specified label constraints.
- limit (int | None): No description.
- fetch_resources (bool): No description.
+ chunk_size: Number of Relationships to return in each chunk. Defaults to yielding one relationship at a time.
+ source_external_ids: Include relationships that have any of these values in their source External Id field
+ source_types: Include relationships that have any of these values in their source Type field
+ target_external_ids: Include relationships that have any of these values in their target External Id field
+ target_types: Include relationships that have any of these values in their target Type field
+ data_set_ids: Return only relationships in the specified data set(s) with this id / these ids.
+ data_set_external_ids: Return only relationships in the specified data set(s) with this external id / these external ids.
+ start_time: Range between two timestamps, minimum and maximum milliseconds (inclusive)
+ end_time: Range between two timestamps, minimum and maximum milliseconds (inclusive)
+ confidence: Range to filter the field for (inclusive).
+ last_updated_time: Range to filter the field for (inclusive).
+ created_time: Range to filter the field for (inclusive).
+ active_at_time: Limits results to those active at any point within the given time range, i.e. if there is any overlap in the intervals [activeAtTime.min, activeAtTime.max] and [startTime, endTime], where both intervals are inclusive. If a relationship does not have a startTime, it is regarded as active from the beginning of time by this filter. If it does not have an endTime is will be regarded as active until the end of time. Similarly, if a min is not supplied to the filter, the min will be implicitly set to the beginning of time, and if a max is not supplied, the max will be implicitly set to the end of time.
+ labels: Return only the resource matching the specified label constraints.
+ limit: No description.
+ fetch_resources: No description.
Yields:
- Relationship | RelationshipList: yields Relationship one by one if chunk_size is not specified, else RelationshipList objects.
+ yields Relationship one by one if chunk_size is not specified, else RelationshipList objects.
""" # noqa: DOC404
data_set_ids_processed = process_data_set_ids(data_set_ids, data_set_external_ids)
filter = RelationshipFilter(
@@ -156,11 +156,11 @@ async def retrieve(self, external_id: str, fetch_resources: bool = False) -> Rel
"""Retrieve a single relationship by external id.
Args:
- external_id (str): External ID
- fetch_resources (bool): If true, will try to return the full resources referenced by the relationship in the source and target fields.
+ external_id: External ID
+ fetch_resources: If true, will try to return the full resources referenced by the relationship in the source and target fields.
Returns:
- Relationship | None: Requested relationship or None if it does not exist.
+ Requested relationship or None if it does not exist.
Examples:
@@ -185,13 +185,12 @@ async def retrieve_multiple(
"""`Retrieve multiple relationships by external id. `_
Args:
- external_ids (SequenceNotStr[str]): External IDs
- fetch_resources (bool): If true, will try to return the full resources referenced by the relationship in the
- source and target fields.
- ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception.
+ external_ids: External IDs
+ fetch_resources: If true, will try to return the full resources referenced by the relationship in the source and target fields.
+ ignore_unknown_ids: Ignore IDs and external IDs that are not found rather than throw an exception.
Returns:
- RelationshipList: The requested relationships.
+ The requested relationships.
Examples:
@@ -233,25 +232,25 @@ async def list(
"""`Lists relationships stored in the project based on a query filter given in the payload of this request. Up to 1000 relationships can be retrieved in one operation. `_
Args:
- source_external_ids (SequenceNotStr[str] | None): Include relationships that have any of these values in their source External Id field
- source_types (SequenceNotStr[str] | None): Include relationships that have any of these values in their source Type field
- target_external_ids (SequenceNotStr[str] | None): Include relationships that have any of these values in their target External Id field
- target_types (SequenceNotStr[str] | None): Include relationships that have any of these values in their target Type field
- data_set_ids (int | Sequence[int] | None): Return only relationships in the specified data set(s) with this id / these ids.
- data_set_external_ids (str | SequenceNotStr[str] | None): Return only relationships in the specified data set(s) with this external id / these external ids.
- start_time (dict[str, int] | None): Range between two timestamps, minimum and maximum milliseconds (inclusive)
- end_time (dict[str, int] | None): Range between two timestamps, minimum and maximum milliseconds (inclusive)
- confidence (dict[str, int] | None): Range to filter the field for (inclusive).
- last_updated_time (dict[str, int] | None): Range to filter the field for (inclusive).
- created_time (dict[str, int] | None): Range to filter the field for (inclusive).
- active_at_time (dict[str, int] | None): Limits results to those active at any point within the given time range, i.e. if there is any overlap in the intervals [activeAtTime.min, activeAtTime.max] and [startTime, endTime], where both intervals are inclusive. If a relationship does not have a startTime, it is regarded as active from the beginning of time by this filter. If it does not have an endTime is will be regarded as active until the end of time. Similarly, if a min is not supplied to the filter, the min will be implicitly set to the beginning of time, and if a max is not supplied, the max will be implicitly set to the end of time.
- labels (LabelFilter | None): Return only the resource matching the specified label constraints.
- limit (int | None): Maximum number of relationships to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
- partitions (int | None): Retrieve relationships in parallel using this number of workers. Also requires `limit=None` to be passed.
- fetch_resources (bool): if true, will try to return the full resources referenced by the relationship in the source and target fields.
+ source_external_ids: Include relationships that have any of these values in their source External Id field
+ source_types: Include relationships that have any of these values in their source Type field
+ target_external_ids: Include relationships that have any of these values in their target External Id field
+ target_types: Include relationships that have any of these values in their target Type field
+ data_set_ids: Return only relationships in the specified data set(s) with this id / these ids.
+ data_set_external_ids: Return only relationships in the specified data set(s) with this external id / these external ids.
+ start_time: Range between two timestamps, minimum and maximum milliseconds (inclusive)
+ end_time: Range between two timestamps, minimum and maximum milliseconds (inclusive)
+ confidence: Range to filter the field for (inclusive).
+ last_updated_time: Range to filter the field for (inclusive).
+ created_time: Range to filter the field for (inclusive).
+ active_at_time: Limits results to those active at any point within the given time range, i.e. if there is any overlap in the intervals [activeAtTime.min, activeAtTime.max] and [startTime, endTime], where both intervals are inclusive. If a relationship does not have a startTime, it is regarded as active from the beginning of time by this filter. If it does not have an endTime is will be regarded as active until the end of time. Similarly, if a min is not supplied to the filter, the min will be implicitly set to the beginning of time, and if a max is not supplied, the max will be implicitly set to the end of time.
+ labels: Return only the resource matching the specified label constraints.
+ limit: Maximum number of relationships to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
+ partitions: Retrieve relationships in parallel using this number of workers. Also requires `limit=None` to be passed.
+ fetch_resources: if true, will try to return the full resources referenced by the relationship in the source and target fields.
Returns:
- RelationshipList: List of requested relationships
+ List of requested relationships
Examples:
@@ -349,10 +348,10 @@ async def create(
"""`Create one or more relationships. `_
Args:
- relationship (Relationship | RelationshipWrite | Sequence[Relationship | RelationshipWrite]): Relationship or list of relationships to create.
+ relationship: Relationship or list of relationships to create.
Returns:
- Relationship | RelationshipList: Created relationship(s)
+ Created relationship(s)
Note:
- The source_type and target_type field in the Relationship(s) can be any string among "Asset", "TimeSeries", "File", "Event", "Sequence".
@@ -419,11 +418,11 @@ async def update(
Currently, a full replacement of labels on a relationship is not supported (only partial add/remove updates). See the example below on how to perform partial labels update.
Args:
- item (Relationship | RelationshipWrite | RelationshipUpdate | Sequence[Relationship | RelationshipWrite | RelationshipUpdate]): Relationship(s) to update
- mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (Relationship or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`.
+ item: Relationship(s) to update
+ mode: How to update data when a non-update object is given (Relationship or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`.
Returns:
- Relationship | RelationshipList: Updated relationship(s)
+ Updated relationship(s)
Examples:
Update a data set that you have fetched. This will perform a full update of the data set:
@@ -482,11 +481,11 @@ async def upsert(
For more details, see :ref:`appendix-upsert`.
Args:
- item (Relationship | RelationshipWrite | Sequence[Relationship | RelationshipWrite]): Relationship or list of relationships to upsert.
- mode (Literal['patch', 'replace']): Whether to patch or replace in the case the relationships are existing. If you set 'patch', the call will only update fields with non-null values (default). Setting 'replace' will unset any fields that are not specified.
+ item: Relationship or list of relationships to upsert.
+ mode: Whether to patch or replace in the case the relationships are existing. If you set 'patch', the call will only update fields with non-null values (default). Setting 'replace' will unset any fields that are not specified.
Returns:
- Relationship | RelationshipList: The upserted relationship(s).
+ The upserted relationship(s).
Examples:
@@ -520,8 +519,8 @@ async def delete(self, external_id: str | SequenceNotStr[str], ignore_unknown_id
"""`Delete one or more relationships. `_
Args:
- external_id (str | SequenceNotStr[str]): External ID or list of external ids
- ignore_unknown_ids (bool): Ignore external IDs that are not found rather than throw an exception.
+ external_id: External ID or list of external ids
+ ignore_unknown_ids: Ignore external IDs that are not found rather than throw an exception.
Examples:
Delete relationships by external id:
diff --git a/cognite/client/_api/sequence_data.py b/cognite/client/_api/sequence_data.py
index c04adddb06..f3cbd4bc58 100644
--- a/cognite/client/_api/sequence_data.py
+++ b/cognite/client/_api/sequence_data.py
@@ -45,10 +45,10 @@ async def insert(
"""`Insert rows into a sequence `_
Args:
- rows (SequenceRows | dict[int, typing.Sequence[int | float | str]] | typing.Sequence[tuple[int, typing.Sequence[int | float | str]]] | typing.Sequence[dict[str, Any]]): The rows you wish to insert. Can either be a list of tuples, a list of {"rowNumber":... ,"values": ...} objects, a dictionary of rowNumber: data, or a SequenceData object. See examples below.
- columns (SequenceNotStr[str] | None): List of external id for the columns of the sequence.
- id (int | None): Id of sequence to insert rows into.
- external_id (str | None): External id of sequence to insert rows into.
+ rows: The rows you wish to insert. Can either be a list of tuples, a list of {"rowNumber":... ,"values": ...} objects, a dictionary of rowNumber: data, or a SequenceData object. See examples below.
+ columns: List of external id for the columns of the sequence.
+ id: Id of sequence to insert rows into.
+ external_id: External id of sequence to insert rows into.
Examples:
Your rows of data can be a list of tuples where the first element is the rownumber and the second element is the data to be inserted:
@@ -127,10 +127,10 @@ async def insert_dataframe(
The sequence and columns must already exist.
Args:
- dataframe (pd.DataFrame): Pandas DataFrame object containing the sequence data.
- id (int | None): Id of sequence to insert rows into.
- external_id (str | None): External id of sequence to insert rows into.
- dropna (bool): Whether to drop rows where all values are missing. Default: True.
+ dataframe: Pandas DataFrame object containing the sequence data.
+ id: Id of sequence to insert rows into.
+ external_id: External id of sequence to insert rows into.
+ dropna: Whether to drop rows where all values are missing. Default: True.
Examples:
Insert three rows into columns 'col_a' and 'col_b' of the sequence with id=123:
@@ -157,9 +157,9 @@ async def delete(self, rows: typing.Sequence[int], id: int | None = None, extern
"""`Delete rows from a sequence `_
Args:
- rows (typing.Sequence[int]): List of row numbers.
- id (int | None): Id of sequence to delete rows from.
- external_id (str | None): External id of sequence to delete rows from.
+ rows: List of row numbers.
+ id: Id of sequence to delete rows from.
+ external_id: External id of sequence to delete rows from.
Examples:
@@ -185,10 +185,10 @@ async def delete_range(
"""`Delete a range of rows from a sequence. Note this operation is potentially slow, as retrieves each row before deleting. `_
Args:
- start (int): Row number to start from (inclusive).
- end (int | None): Upper limit on the row number (exclusive). Set to None or -1 to delete all rows until end of sequence.
- id (int | None): Id of sequence to delete rows from.
- external_id (str | None): External id of sequence to delete rows from.
+ start: Row number to start from (inclusive).
+ end: Upper limit on the row number (exclusive). Set to None or -1 to delete all rows until end of sequence.
+ id: Id of sequence to delete rows from.
+ external_id: External id of sequence to delete rows from.
Examples:
@@ -286,15 +286,15 @@ async def retrieve(
"""`Retrieve data from a sequence `_
Args:
- external_id (str | SequenceNotStr[str] | None): The external id of the sequence to retrieve from.
- id (int | typing.Sequence[int] | None): The internal if the sequence to retrieve from.
- start (int): Row number to start from (inclusive).
- end (int | None): Upper limit on the row number (exclusive). Set to None or -1 to get all rows until end of sequence.
- columns (SequenceNotStr[str] | None): List of external id for the columns of the sequence. If 'None' is passed, all columns will be retrieved.
- limit (int | None): Maximum number of rows to return per sequence. Pass None to fetch all (possibly limited by 'end').
+ external_id: The external id of the sequence to retrieve from.
+ id: The internal if the sequence to retrieve from.
+ start: Row number to start from (inclusive).
+ end: Upper limit on the row number (exclusive). Set to None or -1 to get all rows until end of sequence.
+ columns: List of external id for the columns of the sequence. If 'None' is passed, all columns will be retrieved.
+ limit: Maximum number of rows to return per sequence. Pass None to fetch all (possibly limited by 'end').
Returns:
- SequenceRows | SequenceRowsList: SequenceRows if a single identifier was given, else SequenceRowsList
+ SequenceRows if a single identifier was given, else SequenceRowsList
Examples:
@@ -342,13 +342,13 @@ async def retrieve_last_row(
"""`Retrieves the last row (i.e the row with the highest row number) in a sequence. `_
Args:
- id (int | None): Id or list of ids.
- external_id (str | None): External id or list of external ids.
- columns (SequenceNotStr[str] | None): List of external id for the columns of the sequence. If 'None' is passed, all columns will be retrieved.
- before (int | None): (optional, int): Get latest datapoint before this row number.
+ id: Id or list of ids.
+ external_id: External id or list of external ids.
+ columns: List of external id for the columns of the sequence. If 'None' is passed, all columns will be retrieved.
+ before: Get latest datapoint before this row number.
Returns:
- SequenceRows: A Datapoints object containing the requested data, or a list of such objects.
+ A Datapoints object containing the requested data, or a list of such objects.
Examples:
@@ -380,16 +380,16 @@ async def retrieve_dataframe(
"""`Retrieve data from a sequence as a pandas dataframe `_
Args:
- start (int): (inclusive) row number to start from.
- end (int | None): (exclusive) upper limit on the row number. Set to None or -1 to get all rows until end of sequence.
- columns (list[str] | None): List of external id for the columns of the sequence. If 'None' is passed, all columns will be retrieved.
- external_id (str | None): External id of sequence.
- column_names (str | None): Which field(s) to use as column header. Can use "externalId", "id", "columnExternalId", "id|columnExternalId" or "externalId|columnExternalId". Default is "externalId|columnExternalId" for queries on more than one sequence, and "columnExternalId" for queries on a single sequence.
- id (int | None): Id of sequence
- limit (int | None): Maximum number of rows to return per sequence.
+ start: (inclusive) row number to start from.
+ end: (exclusive) upper limit on the row number. Set to None or -1 to get all rows until end of sequence.
+ columns: List of external id for the columns of the sequence. If 'None' is passed, all columns will be retrieved.
+ external_id: External id of sequence.
+ column_names: Which field(s) to use as column header. Can use "externalId", "id", "columnExternalId", "id|columnExternalId" or "externalId|columnExternalId". Default is "externalId|columnExternalId" for queries on more than one sequence, and "columnExternalId" for queries on a single sequence.
+ id: Id of sequence
+ limit: Maximum number of rows to return per sequence.
Returns:
- pd.DataFrame: The requested sequence data in a pandas DataFrame
+ The requested sequence data in a pandas DataFrame
Examples:
>>> from cognite.client import CogniteClient, AsyncCogniteClient
diff --git a/cognite/client/_api/sequences.py b/cognite/client/_api/sequences.py
index 026b98fce2..9aba2fff3e 100644
--- a/cognite/client/_api/sequences.py
+++ b/cognite/client/_api/sequences.py
@@ -128,23 +128,23 @@ async def __call__(
Fetches sequences as they are iterated over, so you keep a limited number of objects in memory.
Args:
- chunk_size (int | None): Number of sequences to return in each chunk. Defaults to yielding one event a time.
- name (str | None): Filter out sequences that do not have this *exact* name.
- external_id_prefix (str | None): Filter out sequences that do not have this string as the start of the externalId
- metadata (dict[str, str] | None): Filter out sequences that do not match these metadata fields and values (case-sensitive). Format is {"key1":"value1","key2":"value2"}.
- asset_ids (typing.Sequence[int] | None): Filter out sequences that are not linked to any of these assets.
- asset_subtree_ids (int | typing.Sequence[int] | None): Only include sequences that have a related asset in a subtree rooted at any of these assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned.
- asset_subtree_external_ids (str | SequenceNotStr[str] | None): Only include sequences that have a related asset in a subtree rooted at any of these assetExternalIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned.
- data_set_ids (int | typing.Sequence[int] | None): Return only sequences in the specified data set(s) with this id / these ids.
- data_set_external_ids (str | SequenceNotStr[str] | None): Return only sequences in the specified data set(s) with this external id / these external ids.
- created_time (dict[str, Any] | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms.
- last_updated_time (dict[str, Any] | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms.
- limit (int | None): Max number of sequences to return. Defaults to return all items.
- advanced_filter (Filter | dict[str, Any] | None): Advanced filter query using the filter DSL (Domain Specific Language). It allows defining complex filtering expressions that combine simple operations, such as equals, prefix, exists, etc., using boolean operators and, or, and not.
- sort (SortSpec | list[SortSpec] | None): The criteria to sort by. Defaults to desc for `_score_` and asc for all other properties. Sort is not allowed if `partitions` is used.
+ chunk_size: Number of sequences to return in each chunk. Defaults to yielding one event a time.
+ name: Filter out sequences that do not have this *exact* name.
+ external_id_prefix: Filter out sequences that do not have this string as the start of the externalId
+ metadata: Filter out sequences that do not match these metadata fields and values (case-sensitive). Format is {"key1":"value1","key2":"value2"}.
+ asset_ids: Filter out sequences that are not linked to any of these assets.
+ asset_subtree_ids: Only include sequences that have a related asset in a subtree rooted at any of these assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned.
+ asset_subtree_external_ids: Only include sequences that have a related asset in a subtree rooted at any of these assetExternalIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned.
+ data_set_ids: Return only sequences in the specified data set(s) with this id / these ids.
+ data_set_external_ids: Return only sequences in the specified data set(s) with this external id / these external ids.
+ created_time: Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms.
+ last_updated_time: Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms.
+ limit: Max number of sequences to return. Defaults to return all items.
+ advanced_filter: Advanced filter query using the filter DSL (Domain Specific Language). It allows defining complex filtering expressions that combine simple operations, such as equals, prefix, exists, etc., using boolean operators and, or, and not.
+ sort: The criteria to sort by. Defaults to desc for `_score_` and asc for all other properties. Sort is not allowed if `partitions` is used.
Yields:
- Sequence | SequenceList: yields Sequence one by one if chunk_size is not specified, else SequenceList objects.
+ yields Sequence one by one if chunk_size is not specified, else SequenceList objects.
""" # noqa: DOC404
asset_subtree_ids_processed = process_asset_subtree_ids(asset_subtree_ids, asset_subtree_external_ids)
data_set_ids_processed = process_data_set_ids(data_set_ids, data_set_external_ids)
@@ -179,11 +179,11 @@ async def retrieve(self, id: int | None = None, external_id: str | None = None)
"""`Retrieve a single sequence by id. `_
Args:
- id (int | None): ID
- external_id (str | None): External ID
+ id: ID
+ external_id: External ID
Returns:
- Sequence | None: Requested sequence or None if it does not exist.
+ Requested sequence or None if it does not exist.
Examples:
@@ -210,12 +210,12 @@ async def retrieve_multiple(
"""`Retrieve multiple sequences by id. `_
Args:
- ids (typing.Sequence[int] | None): IDs
- external_ids (SequenceNotStr[str] | None): External IDs
- ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception.
+ ids: IDs
+ external_ids: External IDs
+ ignore_unknown_ids: Ignore IDs and external IDs that are not found rather than throw an exception.
Returns:
- SequenceList: The requested sequences.
+ The requested sequences.
Examples:
@@ -243,11 +243,11 @@ async def aggregate_count(
"""`Count of sequences matching the specified filters and search. `_
Args:
- advanced_filter (Filter | dict[str, Any] | None): The filter to narrow down the sequences to count.
- filter (SequenceFilter | dict[str, Any] | None): The filter to narrow down sequences to count requiring exact match.
+ advanced_filter: The filter to narrow down the sequences to count.
+ filter: The filter to narrow down sequences to count requiring exact match.
Returns:
- int: The number of sequences matching the specified filters and search.
+ The number of sequences matching the specified filters and search.
Examples:
@@ -284,13 +284,13 @@ async def aggregate_cardinality_values(
"""`Find approximate property count for sequences. `_
Args:
- property (SequenceProperty | str | list[str]): The property to count the cardinality of.
- advanced_filter (Filter | dict[str, Any] | None): The filter to narrow down the sequences to count cardinality.
- aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets.
- filter (SequenceFilter | dict[str, Any] | None): The filter to narrow down the sequences to count requiring exact match.
+ property: The property to count the cardinality of.
+ advanced_filter: The filter to narrow down the sequences to count cardinality.
+ aggregate_filter: The filter to apply to the resulting buckets.
+ filter: The filter to narrow down the sequences to count requiring exact match.
Returns:
- int: The number of properties matching the specified filters and search.
+ The number of properties matching the specified filters and search.
Examples:
@@ -334,13 +334,13 @@ async def aggregate_cardinality_properties(
"""`Find approximate paths count for sequences. `_
Args:
- path (SequenceProperty | str | list[str]): The scope in every document to aggregate properties. The only value allowed now is ["metadata"]. It means to aggregate only metadata properties (aka keys).
- advanced_filter (Filter | dict[str, Any] | None): The filter to narrow down the sequences to count cardinality.
- aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets.
- filter (SequenceFilter | dict[str, Any] | None): The filter to narrow down the sequences to count requiring exact match.
+ path: The scope in every document to aggregate properties. The only value allowed now is ["metadata"]. It means to aggregate only metadata properties (aka keys).
+ advanced_filter: The filter to narrow down the sequences to count cardinality.
+ aggregate_filter: The filter to apply to the resulting buckets.
+ filter: The filter to narrow down the sequences to count requiring exact match.
Returns:
- int: The number of properties matching the specified filters and search.
+ The number of properties matching the specified filters and search.
Examples:
@@ -372,13 +372,13 @@ async def aggregate_unique_values(
"""`Get unique paths with counts for sequences. `_
Args:
- property (SequenceProperty | str | list[str]): The property to group by.
- advanced_filter (Filter | dict[str, Any] | None): The filter to narrow down the sequences to count cardinality.
- aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets.
- filter (SequenceFilter | dict[str, Any] | None): The filter to narrow down the sequences to count requiring exact match.
+ property: The property to group by.
+ advanced_filter: The filter to narrow down the sequences to count cardinality.
+ aggregate_filter: The filter to apply to the resulting buckets.
+ filter: The filter to narrow down the sequences to count requiring exact match.
Returns:
- UniqueResultList: List of unique values of sequences matching the specified filters and search.
+ List of unique values of sequences matching the specified filters and search.
Examples:
@@ -440,13 +440,13 @@ async def aggregate_unique_properties(
"""`Find approximate unique sequence properties. `_
Args:
- path (SequenceProperty | str | list[str]): The scope in every document to aggregate properties. The only value allowed now is ["metadata"]. It means to aggregate only metadata properties (aka keys).
- advanced_filter (Filter | dict[str, Any] | None): The filter to narrow down the sequences to count cardinality.
- aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets.
- filter (SequenceFilter | dict[str, Any] | None): The filter to narrow down the sequences to count requiring exact match.
+ path: The scope in every document to aggregate properties. The only value allowed now is ["metadata"]. It means to aggregate only metadata properties (aka keys).
+ advanced_filter: The filter to narrow down the sequences to count cardinality.
+ aggregate_filter: The filter to apply to the resulting buckets.
+ filter: The filter to narrow down the sequences to count requiring exact match.
Returns:
- UniqueResultList: List of unique values of sequences matching the specified filters and search.
+ List of unique values of sequences matching the specified filters and search.
Examples:
@@ -480,10 +480,10 @@ async def create(
"""`Create one or more sequences. `_
Args:
- sequence (Sequence | SequenceWrite | typing.Sequence[Sequence] | typing.Sequence[SequenceWrite]): Sequence or list of Sequence to create. The Sequence columns parameter is a list of objects with fields `externalId` (external id of the column, when omitted, they will be given ids of 'column0, column1, ...'), `valueType` (data type of the column, either STRING, LONG, or DOUBLE, with default DOUBLE), `name`, `description`, `metadata` (optional fields to describe and store information about the data in the column). Other fields will be removed automatically, so a columns definition from a different sequence object can be passed here.
+ sequence: Sequence or list of Sequence to create. The Sequence columns parameter is a list of objects with fields `externalId` (external id of the column, when omitted, they will be given ids of 'column0, column1, ...'), `valueType` (data type of the column, either STRING, LONG, or DOUBLE, with default DOUBLE), `name`, `description`, `metadata` (optional fields to describe and store information about the data in the column). Other fields will be removed automatically, so a columns definition from a different sequence object can be passed here.
Returns:
- Sequence | SequenceList: The created sequence(s).
+ The created sequence(s).
Examples:
@@ -519,9 +519,9 @@ async def delete(
"""`Delete one or more sequences. `_
Args:
- id (int | typing.Sequence[int] | None): Id or list of ids
- external_id (str | SequenceNotStr[str] | None): External ID or list of external ids
- ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception.
+ id: Id or list of ids
+ external_id: External ID or list of external ids
+ ignore_unknown_ids: Ignore IDs and external IDs that are not found rather than throw an exception.
Examples:
@@ -560,11 +560,11 @@ async def update(
"""`Update one or more sequences. `_
Args:
- item (Sequence | SequenceWrite | SequenceUpdate | typing.Sequence[Sequence | SequenceWrite | SequenceUpdate]): Sequences to update
- mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (Sequence or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`.
+ item: Sequences to update
+ mode: How to update data when a non-update object is given (Sequence or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`.
Returns:
- Sequence | SequenceList: Updated sequences.
+ Updated sequences.
Examples:
@@ -662,11 +662,11 @@ async def upsert(
For more details, see :ref:`appendix-upsert`.
Args:
- item (Sequence | SequenceWrite | typing.Sequence[Sequence | SequenceWrite]): Sequence or list of sequences to upsert.
- mode (Literal['patch', 'replace']): Whether to patch or replace in the case the sequences are existing. If you set 'patch', the call will only update fields with non-null values (default). Setting 'replace' will unset any fields that are not specified.
+ item: Sequence or list of sequences to upsert.
+ mode: Whether to patch or replace in the case the sequences are existing. If you set 'patch', the call will only update fields with non-null values (default). Setting 'replace' will unset any fields that are not specified.
Returns:
- Sequence | SequenceList: The upserted sequence(s).
+ The upserted sequence(s).
Examples:
@@ -795,14 +795,14 @@ async def search(
Primarily meant for human-centric use-cases and data exploration, not for programs, since matching and ordering may change over time. Use the `list` function if stable or exact matches are required.
Args:
- name (str | None): Prefix and fuzzy search on name.
- description (str | None): Prefix and fuzzy search on description.
- query (str | None): Search on name and description using wildcard search on each of the words (separated by spaces). Retrieves results where at least one word must match. Example: 'some other'
- filter (SequenceFilter | dict[str, Any] | None): Filter to apply. Performs exact match on these fields.
- limit (int): Max number of results to return.
+ name: Prefix and fuzzy search on name.
+ description: Prefix and fuzzy search on description.
+ query: Search on name and description using wildcard search on each of the words (separated by spaces). Retrieves results where at least one word must match. Example: 'some other'
+ filter: Filter to apply. Performs exact match on these fields.
+ limit: Max number of results to return.
Returns:
- SequenceList: The search result as a SequenceList
+ The search result as a SequenceList
Examples:
@@ -843,23 +843,23 @@ async def list(
"""`List sequences `_
Args:
- name (str | None): Filter out sequences that do not have this *exact* name.
- external_id_prefix (str | None): Filter out sequences that do not have this string as the start of the externalId
- metadata (dict[str, str] | None): Filter out sequences that do not match these metadata fields and values (case-sensitive). Format is {"key1":"value1","key2":"value2"}.
- asset_ids (typing.Sequence[int] | None): Filter out sequences that are not linked to any of these assets.
- asset_subtree_ids (int | typing.Sequence[int] | None): Only include sequences that have a related asset in a subtree rooted at any of these assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned.
- asset_subtree_external_ids (str | SequenceNotStr[str] | None): Only include sequences that have a related asset in a subtree rooted at any of these assetExternalIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned.
- data_set_ids (int | typing.Sequence[int] | None): Return only sequences in the specified data set(s) with this id / these ids.
- data_set_external_ids (str | SequenceNotStr[str] | None): Return only sequences in the specified data set(s) with this external id / these external ids.
- created_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms.
- last_updated_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms.
- limit (int | None): Max number of sequences to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
- partitions (int | None): Retrieve resources in parallel using this number of workers (values up to 10 allowed), limit must be set to `None` (or `-1`).
- advanced_filter (Filter | dict[str, Any] | None): Advanced filter query using the filter DSL (Domain Specific Language). It allows defining complex filtering expressions that combine simple operations, such as equals, prefix, exists, etc., using boolean operators and, or, and not. See examples below for usage.
- sort (SortSpec | list[SortSpec] | None): The criteria to sort by. Defaults to desc for `_score_` and asc for all other properties. Sort is not allowed if `partitions` is used.
+ name: Filter out sequences that do not have this *exact* name.
+ external_id_prefix: Filter out sequences that do not have this string as the start of the externalId
+ metadata: Filter out sequences that do not match these metadata fields and values (case-sensitive). Format is {"key1":"value1","key2":"value2"}.
+ asset_ids: Filter out sequences that are not linked to any of these assets.
+ asset_subtree_ids: Only include sequences that have a related asset in a subtree rooted at any of these assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned.
+ asset_subtree_external_ids: Only include sequences that have a related asset in a subtree rooted at any of these assetExternalIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned.
+ data_set_ids: Return only sequences in the specified data set(s) with this id / these ids.
+ data_set_external_ids: Return only sequences in the specified data set(s) with this external id / these external ids.
+ created_time: Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms.
+ last_updated_time: Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms.
+ limit: Max number of sequences to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
+ partitions: Retrieve resources in parallel using this number of workers (values up to 10 allowed), limit must be set to `None` (or `-1`).
+ advanced_filter: Advanced filter query using the filter DSL (Domain Specific Language). It allows defining complex filtering expressions that combine simple operations, such as equals, prefix, exists, etc., using boolean operators and, or, and not. See examples below for usage.
+ sort: The criteria to sort by. Defaults to desc for `_score_` and asc for all other properties. Sort is not allowed if `partitions` is used.
Returns:
- SequenceList: The requested sequences.
+ The requested sequences.
.. note::
When using `partitions`, there are few considerations to keep in mind:
diff --git a/cognite/client/_api/simulators/__init__.py b/cognite/client/_api/simulators/__init__.py
index 3886397c0b..c377825a77 100644
--- a/cognite/client/_api/simulators/__init__.py
+++ b/cognite/client/_api/simulators/__init__.py
@@ -46,11 +46,11 @@ async def __call__(
Fetches simulators as they are iterated over, so you keep a limited number of simulators in memory.
Args:
- chunk_size (int | None): Number of simulators to return in each chunk. Defaults to yielding one simulator a time.
- limit (int | None): Maximum number of simulators to return. Defaults to return all items.
+ chunk_size: Number of simulators to return in each chunk. Defaults to yielding one simulator a time.
+ limit: Maximum number of simulators to return. Defaults to return all items.
Yields:
- Simulator | SimulatorList: yields Simulator one by one if chunk is not specified, else SimulatorList objects.
+ yields Simulator one by one if chunk is not specified, else SimulatorList objects.
""" # noqa: DOC404
async for item in self._list_generator(
list_cls=SimulatorList,
@@ -65,10 +65,10 @@ async def list(self, limit: int | None = DEFAULT_LIMIT_READ) -> SimulatorList:
"""`List all simulators `_
Args:
- limit (int | None): Maximum number of results to return. Defaults to 25. Set to -1, float(“inf”) or None to return all items.
+ limit: Maximum number of results to return. Defaults to 25. Set to -1, float(“inf”) or None to return all items.
Returns:
- SimulatorList: List of simulators
+ List of simulators
Examples:
List simulators:
diff --git a/cognite/client/_api/simulators/integrations.py b/cognite/client/_api/simulators/integrations.py
index 93dcb515dd..4fd0371e6b 100644
--- a/cognite/client/_api/simulators/integrations.py
+++ b/cognite/client/_api/simulators/integrations.py
@@ -56,13 +56,13 @@ async def __call__(
Fetches simulator integrations as they are iterated over, so you keep a limited number of simulator integrations in memory.
Args:
- chunk_size (int | None): Number of simulator integrations to return in each chunk. Defaults to yielding one simulator integration a time.
- simulator_external_ids (str | SequenceNotStr[str] | None): Filter on simulator external ids.
- active (bool | None): Filter on active status of the simulator integration.
- limit (int | None): The maximum number of simulator integrations to return, pass None to return all.
+ chunk_size: Number of simulator integrations to return in each chunk. Defaults to yielding one simulator integration a time.
+ simulator_external_ids: Filter on simulator external ids.
+ active: Filter on active status of the simulator integration.
+ limit: The maximum number of simulator integrations to return, pass None to return all.
Yields:
- SimulatorIntegration | SimulatorIntegrationList: yields SimulatorIntegration one by one if chunk_size is not specified, else SimulatorIntegrationList objects.
+ yields SimulatorIntegration one by one if chunk_size is not specified, else SimulatorIntegrationList objects.
""" # noqa: DOC404
integrations_filter = SimulatorIntegrationFilter(simulator_external_ids=simulator_external_ids, active=active)
async for item in self._list_generator(
@@ -86,12 +86,12 @@ async def list(
Retrieves a list of simulator integrations that match the given criteria.
Args:
- limit (int | None): The maximum number of simulator integrations to return, pass None to return all.
- simulator_external_ids (str | SequenceNotStr[str] | None): Filter on simulator external ids.
- active (bool | None): Filter on active status of the simulator integration.
+ limit: The maximum number of simulator integrations to return, pass None to return all.
+ simulator_external_ids: Filter on simulator external ids.
+ active: Filter on active status of the simulator integration.
Returns:
- SimulatorIntegrationList: List of simulator integrations
+ List of simulator integrations
Examples:
List a few simulator integrations:
@@ -128,8 +128,8 @@ async def delete(
"""`Delete simulator integrations `_
Args:
- ids (int | Sequence[int] | None): Id(s) of simulator integrations to delete
- external_ids (str | SequenceNotStr[str] | None): External_id(s) of simulator integrations to delete
+ ids: Id(s) of simulator integrations to delete
+ external_ids: External_id(s) of simulator integrations to delete
Examples:
Delete simulator integrations by id or external id:
diff --git a/cognite/client/_api/simulators/logs.py b/cognite/client/_api/simulators/logs.py
index 12fc4a4516..37d011c659 100644
--- a/cognite/client/_api/simulators/logs.py
+++ b/cognite/client/_api/simulators/logs.py
@@ -41,10 +41,10 @@ async def retrieve(self, ids: int | Sequence[int]) -> SimulatorLogList | Simulat
They help users identify issues, diagnose problems, and gain insights into the behavior of the simulator integrations.
Args:
- ids (int | Sequence[int]): The ids of the simulator log.
+ ids: The ids of the simulator log.
Returns:
- SimulatorLogList | SimulatorLog | None: Requested simulator log(s)
+ Requested simulator log(s)
Examples:
Get simulator logs by simulator model id:
diff --git a/cognite/client/_api/simulators/models.py b/cognite/client/_api/simulators/models.py
index 8acba1e0df..6dcc1513d7 100644
--- a/cognite/client/_api/simulators/models.py
+++ b/cognite/client/_api/simulators/models.py
@@ -46,12 +46,12 @@ async def list(
Retrieves a list of simulator models that match the given criteria.
Args:
- limit (int | None): Maximum number of results to return. Defaults to 25. Set to -1, float(“inf”) or None to return all items.
- simulator_external_ids (str | SequenceNotStr[str] | None): Filter by simulator external id(s).
- sort (PropertySort | None): The criteria to sort by.
+ limit: Maximum number of results to return. Defaults to 25. Set to -1, float(“inf”) or None to return all items.
+ simulator_external_ids: Filter by simulator external id(s).
+ sort: The criteria to sort by.
Returns:
- SimulatorModelList: List of simulator models
+ List of simulator models
Examples:
List simulator models:
@@ -109,11 +109,11 @@ async def retrieve(
Retrieve one or more simulator models by ID(s) or external ID(s).
Args:
- ids (int | Sequence[int] | None): The id of the simulator model(s).
- external_ids (str | SequenceNotStr[str] | None): The external id of the simulator model(s).
+ ids: The id of the simulator model(s).
+ external_ids: The external id of the simulator model(s).
Returns:
- SimulatorModel | SimulatorModelList | None: Requested simulator model(s)
+ Requested simulator model(s)
Examples:
Get simulator model by id:
@@ -171,13 +171,13 @@ async def __call__(
Fetches simulator models as they are iterated over, so you keep a limited number of simulator models in memory.
Args:
- chunk_size (int | None): Number of simulator models to return in each chunk. Defaults to yielding one simulator model a time.
- simulator_external_ids (str | SequenceNotStr[str] | None): Filter by simulator external id(s).
- sort (PropertySort | None): The criteria to sort by.
- limit (int | None): Maximum number of results to return. Defaults to 25. Set to -1, float(“inf”) or None to return all items.
+ chunk_size: Number of simulator models to return in each chunk. Defaults to yielding one simulator model a time.
+ simulator_external_ids: Filter by simulator external id(s).
+ sort: The criteria to sort by.
+ limit: Maximum number of results to return. Defaults to 25. Set to -1, float(“inf”) or None to return all items.
Yields:
- SimulatorModel | SimulatorModelList: yields SimulatorModel one by one if chunk is not specified, else SimulatorModelList objects.
+ yields SimulatorModel one by one if chunk is not specified, else SimulatorModelList objects.
""" # noqa: DOC404
model_filter = SimulatorModelsFilter(simulator_external_ids=simulator_external_ids)
async for item in self._list_generator(
@@ -203,10 +203,10 @@ async def create(
"""`Create simulator models `_
Args:
- items (SimulatorModelWrite | Sequence[SimulatorModelWrite]): The model(s) to create.
+ items: The model(s) to create.
Returns:
- SimulatorModel | SimulatorModelList: Created simulator model(s)
+ Created simulator model(s)
Examples:
Create new simulator models:
@@ -244,8 +244,8 @@ async def delete(
"""`Delete simulator models `_
Args:
- ids (int | Sequence[int] | None): id (or sequence of ids) for the model(s) to delete.
- external_ids (str | SequenceNotStr[str] | None): external id (or sequence of external ids) for the model(s) to delete.
+ ids: id (or sequence of ids) for the model(s) to delete.
+ external_ids: external id (or sequence of external ids) for the model(s) to delete.
Examples:
Delete simulator models by id or external id:
@@ -282,10 +282,10 @@ async def update(
"""`Update simulator models `_
Args:
- items (SimulatorModel | SimulatorModelWrite | SimulatorModelUpdate | Sequence[SimulatorModel | SimulatorModelWrite | SimulatorModelUpdate]): The model to update.
+ items: The model to update.
Returns:
- SimulatorModel | SimulatorModelList: Updated simulator model(s)
+ Updated simulator model(s)
Examples:
Update a simulator model that you have fetched. This will perform a full update of the model:
diff --git a/cognite/client/_api/simulators/models_revisions.py b/cognite/client/_api/simulators/models_revisions.py
index 5c6e3ae479..fdde26b4d2 100644
--- a/cognite/client/_api/simulators/models_revisions.py
+++ b/cognite/client/_api/simulators/models_revisions.py
@@ -50,15 +50,15 @@ async def list(
Retrieves a list of simulator model revisions that match the given criteria.
Args:
- limit (int): Maximum number of results to return. Defaults to 25. Set to -1, float(“inf”) or None to return all items.
- sort (PropertySort | None): The criteria to sort by.
- model_external_ids (str | SequenceNotStr[str] | None): The external ids of the simulator models to filter by.
- all_versions (bool | None): If True, all versions of the simulator model revisions are returned. If False, only the latest version is returned.
- created_time (TimestampRange | None): Filter by created time.
- last_updated_time (TimestampRange | None): Filter by last updated time.
+ limit: Maximum number of results to return. Defaults to 25. Set to -1, float(“inf”) or None to return all items.
+ sort: The criteria to sort by.
+ model_external_ids: The external ids of the simulator models to filter by.
+ all_versions: If True, all versions of the simulator model revisions are returned. If False, only the latest version is returned.
+ created_time: Filter by created time.
+ last_updated_time: Filter by last updated time.
Returns:
- SimulatorModelRevisionList: List of simulator model revisions
+ List of simulator model revisions
Examples:
List simulator model revisions:
@@ -118,11 +118,11 @@ async def retrieve(
Retrieve one or more simulator model revisions by ID(s) or external ID(s).
Args:
- ids (int | Sequence[int] | None): The ids of the simulator model revisions.
- external_ids (str | SequenceNotStr[str] | None): The external ids of the simulator model revisions.
+ ids: The ids of the simulator model revisions.
+ external_ids: The external ids of the simulator model revisions.
Returns:
- SimulatorModelRevision | SimulatorModelRevisionList | None: Requested simulator model revision(s)
+ Requested simulator model revision(s)
Examples:
Get simulator model revision by id:
@@ -191,16 +191,16 @@ async def __call__(
Fetches simulator model revisions as they are iterated over, so you keep a limited number of simulator model revisions in memory.
Args:
- chunk_size (int | None): Number of simulator model revisions to return in each chunk. Defaults to yielding one simulator model revision a time.
- sort (PropertySort | None): The criteria to sort by.
- model_external_ids (str | SequenceNotStr[str] | None): The external ids of the simulator models to filter by.
- all_versions (bool | None): If True, all versions of the simulator model revisions are returned. If False, only the latest version is returned.
- created_time (TimestampRange | None): Filter by created time.
- last_updated_time (TimestampRange | None): Filter by last updated time.
- limit (int | None): Maximum number of results to return. Defaults to 25. Set to -1, float(“inf”) or None to return all items.
+ chunk_size: Number of simulator model revisions to return in each chunk. Defaults to yielding one simulator model revision a time.
+ sort: The criteria to sort by.
+ model_external_ids: The external ids of the simulator models to filter by.
+ all_versions: If True, all versions of the simulator model revisions are returned. If False, only the latest version is returned.
+ created_time: Filter by created time.
+ last_updated_time: Filter by last updated time.
+ limit: Maximum number of results to return. Defaults to 25. Set to -1, float(“inf”) or None to return all items.
Yields:
- SimulatorModelRevision | SimulatorModelRevisionList: yields SimulatorModelRevision one by one if chunk is not specified, else SimulatorModelRevisionList objects.
+ yields SimulatorModelRevision one by one if chunk is not specified, else SimulatorModelRevisionList objects.
""" # noqa: DOC404
model_revisions_filter = SimulatorModelRevisionsFilter(
model_external_ids=model_external_ids,
@@ -231,10 +231,10 @@ async def create(
"""`Create simulator model revisions `_
Args:
- items (SimulatorModelRevisionWrite | Sequence[SimulatorModelRevisionWrite]): The model revision(s) to create.
+ items: The model revision(s) to create.
Returns:
- SimulatorModelRevision | SimulatorModelRevisionList: Created simulator model revision(s)
+ Created simulator model revision(s)
Examples:
Create new simulator model revisions:
@@ -280,9 +280,9 @@ async def retrieve_data(self, model_revision_external_id: str) -> SimulatorModel
Retrieves a list of simulator model revisions data that match the given criteria.
Args:
- model_revision_external_id (str): The external id of the simulator model revision to filter by.
+ model_revision_external_id: The external id of the simulator model revision to filter by.
Returns:
- SimulatorModelRevisionDataList: List of simulator model revision data
+ List of simulator model revision data
Examples:
List simulator model revision data:
diff --git a/cognite/client/_api/simulators/routine_revisions.py b/cognite/client/_api/simulators/routine_revisions.py
index e8f8a16f4c..7b7a980404 100644
--- a/cognite/client/_api/simulators/routine_revisions.py
+++ b/cognite/client/_api/simulators/routine_revisions.py
@@ -84,20 +84,20 @@ async def __call__(
Fetches simulator routine revisions as they are iterated over, so you keep a limited number of simulator routine revisions in memory.
Args:
- chunk_size (int | None): Number of simulator routine revisions to return in each chunk. Defaults to yielding one simulator routine revision a time.
- routine_external_ids (SequenceNotStr[str] | None): Filter on routine external ids.
- model_external_ids (SequenceNotStr[str] | None): Filter on model external ids.
- simulator_integration_external_ids (SequenceNotStr[str] | None): Filter on simulator integration external ids.
- simulator_external_ids (SequenceNotStr[str] | None): Filter on simulator external ids.
- kind (Literal['long'] | None): Filter by routine kind. Note that this filter cannot be applied when 'include_all_fields' set to 'True' in the same query.
- created_time (TimestampRange | None): Filter on created time.
- all_versions (bool): If all versions of the routine should be returned. Defaults to false which only returns the latest version.
- include_all_fields (bool): If all fields should be included in the response. Defaults to false which does not include script, configuration.inputs and configuration.outputs in the response.
- limit (int | None): Maximum number of simulator routine revisions to return. Defaults to return all items.
- sort (PropertySort | None): The criteria to sort by.
+ chunk_size: Number of simulator routine revisions to return in each chunk. Defaults to yielding one simulator routine revision a time.
+ routine_external_ids: Filter on routine external ids.
+ model_external_ids: Filter on model external ids.
+ simulator_integration_external_ids: Filter on simulator integration external ids.
+ simulator_external_ids: Filter on simulator external ids.
+ kind: Filter by routine kind. Note that this filter cannot be applied when 'include_all_fields' set to 'True' in the same query.
+ created_time: Filter on created time.
+ all_versions: If all versions of the routine should be returned. Defaults to false which only returns the latest version.
+ include_all_fields: If all fields should be included in the response. Defaults to false which does not include script, configuration.inputs and configuration.outputs in the response.
+ limit: Maximum number of simulator routine revisions to return. Defaults to return all items.
+ sort: The criteria to sort by.
Yields:
- SimulatorRoutineRevision | SimulatorRoutineRevisionList: yields SimulatorRoutineRevision one by one if chunk is not specified, else SimulatorRoutineRevisionList objects.
+ yields SimulatorRoutineRevision one by one if chunk is not specified, else SimulatorRoutineRevisionList objects.
""" # noqa: DOC404
self._warning.warn()
filter = SimulatorRoutineRevisionsFilter(
@@ -145,11 +145,11 @@ async def retrieve(
Retrieve simulator routine revisions by ID or External Id.
Args:
- ids (int | Sequence[int] | None): Simulator routine revision ID or list of IDs
- external_ids (str | SequenceNotStr[str] | None): Simulator routine revision External ID or list of external IDs
+ ids: Simulator routine revision ID or list of IDs
+ external_ids: Simulator routine revision External ID or list of external IDs
Returns:
- SimulatorRoutineRevision | SimulatorRoutineRevisionList | None: Requested simulator routine revision
+ Requested simulator routine revision
Examples:
Get simulator routine revision by id:
@@ -183,10 +183,10 @@ async def create(
"""`Create simulator routine revisions `_
Args:
- items (SimulatorRoutineRevisionWrite | Sequence[SimulatorRoutineRevisionWrite]): Simulator routine revisions to create.
+ items: Simulator routine revisions to create.
Returns:
- SimulatorRoutineRevision | SimulatorRoutineRevisionList: Created simulator routine revision(s)
+ Created simulator routine revision(s)
Examples:
Create new simulator routine revisions:
@@ -325,19 +325,19 @@ async def list(
Retrieves a list of simulator routine revisions that match the given criteria.
Args:
- routine_external_ids (SequenceNotStr[str] | None): Filter on routine external ids.
- model_external_ids (SequenceNotStr[str] | None): Filter on model external ids.
- simulator_integration_external_ids (SequenceNotStr[str] | None): Filter on simulator integration external ids.
- simulator_external_ids (SequenceNotStr[str] | None): Filter on simulator external ids.
- kind (Literal['long'] | None): Filter by routine kind. Note that this filter cannot be applied when 'include_all_fields' set to 'True' in the same query.
- created_time (TimestampRange | None): Filter on created time.
- all_versions (bool): If all versions of the routine should be returned. Defaults to false which only returns the latest version.
- include_all_fields (bool): If all fields should be included in the response. Defaults to false which does not include script, configuration.inputs and configuration.outputs in the response.
- limit (int | None): Maximum number of simulator routine revisions to return. Defaults to return all items.
- sort (PropertySort | None): The criteria to sort by.
+ routine_external_ids: Filter on routine external ids.
+ model_external_ids: Filter on model external ids.
+ simulator_integration_external_ids: Filter on simulator integration external ids.
+ simulator_external_ids: Filter on simulator external ids.
+ kind: Filter by routine kind. Note that this filter cannot be applied when 'include_all_fields' set to 'True' in the same query.
+ created_time: Filter on created time.
+ all_versions: If all versions of the routine should be returned. Defaults to false which only returns the latest version.
+ include_all_fields: If all fields should be included in the response. Defaults to false which does not include script, configuration.inputs and configuration.outputs in the response.
+ limit: Maximum number of simulator routine revisions to return. Defaults to return all items.
+ sort: The criteria to sort by.
Returns:
- SimulatorRoutineRevisionList: List of simulator routine revisions
+ List of simulator routine revisions
Examples:
List simulator routine revisions:
diff --git a/cognite/client/_api/simulators/routines.py b/cognite/client/_api/simulators/routines.py
index 08cb66174f..7e04419465 100644
--- a/cognite/client/_api/simulators/routines.py
+++ b/cognite/client/_api/simulators/routines.py
@@ -71,14 +71,14 @@ async def __call__(
Fetches simulator routines as they are iterated over, so you keep a limited number of simulator routines in memory.
Args:
- chunk_size (int | None): Number of simulator routines to return in each chunk. Defaults to yielding one simulator routine a time.
- model_external_ids (Sequence[str] | None): Filter on model external ids.
- simulator_integration_external_ids (Sequence[str] | None): Filter on simulator integration external ids.
- sort (PropertySort | None): The criteria to sort by.
- limit (int | None): Maximum number of simulator routines to return. Defaults to return all items.
+ chunk_size: Number of simulator routines to return in each chunk. Defaults to yielding one simulator routine a time.
+ model_external_ids: Filter on model external ids.
+ simulator_integration_external_ids: Filter on simulator integration external ids.
+ sort: The criteria to sort by.
+ limit: Maximum number of simulator routines to return. Defaults to return all items.
Yields:
- SimulatorRoutine | SimulatorRoutineList: yields SimulatorRoutine one by one if chunk is not specified, else SimulatorRoutineList objects.
+ yields SimulatorRoutine one by one if chunk is not specified, else SimulatorRoutineList objects.
""" # noqa: DOC404
self._warning.warn()
routines_filter = SimulatorRoutinesFilter(
@@ -109,10 +109,10 @@ async def create(
"""`Create simulator routines `_
Args:
- routine (SimulatorRoutineWrite | Sequence[SimulatorRoutineWrite]): Simulator routine(s) to create.
+ routine: Simulator routine(s) to create.
Returns:
- SimulatorRoutine | SimulatorRoutineList: Created simulator routine(s)
+ Created simulator routine(s)
Examples:
Create new simulator routines:
@@ -156,8 +156,8 @@ async def delete(
"""`Delete simulator routines `_
Args:
- ids (int | Sequence[int] | None): ids (or sequence of ids) for the routine(s) to delete.
- external_ids (str | SequenceNotStr[str] | SequenceNotStr[str] | None): external ids (or sequence of external ids) for the routine(s) to delete.
+ ids: ids (or sequence of ids) for the routine(s) to delete.
+ external_ids: external ids (or sequence of external ids) for the routine(s) to delete.
Examples:
Delete simulator routines by id or external id:
@@ -185,14 +185,14 @@ async def list(
Retrieves a list of simulator routines that match the given criteria.
Args:
- limit (int): Maximum number of results to return. Defaults to 25. Set to -1, float(“inf”) or None to return all items.
- model_external_ids (Sequence[str] | None): Filter on model external ids.
- simulator_integration_external_ids (Sequence[str] | None): Filter on simulator integration external ids.
- kind (Literal['long'] | None): Filter on routine kind.
- sort (PropertySort | None): The criteria to sort by.
+ limit: Maximum number of results to return. Defaults to 25. Set to -1, float(“inf”) or None to return all items.
+ model_external_ids: Filter on model external ids.
+ simulator_integration_external_ids: Filter on simulator integration external ids.
+ kind: Filter on routine kind.
+ sort: The criteria to sort by.
Returns:
- SimulatorRoutineList: List of simulator routines
+ List of simulator routines
Examples:
List simulator routines:
@@ -285,21 +285,18 @@ async def run(
2. By routine revision external ID + model revision external ID
Args:
- routine_external_id (str | None): External id of the simulator routine to run.
- Cannot be specified together with routine_revision_external_id and model_revision_external_id.
- routine_revision_external_id (str | None): External id of the simulator routine revision to run.
- Must be specified together with model_revision_external_id.
- model_revision_external_id (str | None): External id of the simulator model revision.
- Must be specified together with routine_revision_external_id.
- inputs (Sequence[SimulationInputOverride] | None): List of input overrides
- run_time (int | None): Run time in milliseconds. Reference timestamp used for data pre-processing and data sampling.
- queue (bool | None): Queue the simulation run when connector is down.
- log_severity (Literal['Debug', 'Information', 'Warning', 'Error'] | None): Override the minimum severity level for the simulation run logs. If not provided, the minimum severity is read from the connector logger configuration.
- wait (bool): Wait until the simulation run is finished. Defaults to True.
- timeout (float): Timeout in seconds for waiting for the simulation run to finish. Defaults to 60 seconds.
+ routine_external_id: External id of the simulator routine to run. Cannot be specified together with routine_revision_external_id and model_revision_external_id.
+ routine_revision_external_id: External id of the simulator routine revision to run. Must be specified together with model_revision_external_id.
+ model_revision_external_id: External id of the simulator model revision. Must be specified together with routine_revision_external_id.
+ inputs: List of input overrides
+ run_time: Run time in milliseconds. Reference timestamp used for data pre-processing and data sampling.
+ queue: Queue the simulation run when connector is down.
+ log_severity: Override the minimum severity level for the simulation run logs. If not provided, the minimum severity is read from the connector logger configuration.
+ wait: Wait until the simulation run is finished. Defaults to True.
+ timeout: Timeout in seconds for waiting for the simulation run to finish. Defaults to 60 seconds.
Returns:
- SimulationRun: Created simulation run
+ Created simulation run
Examples:
Create new simulation run using routine external ID:
diff --git a/cognite/client/_api/simulators/runs.py b/cognite/client/_api/simulators/runs.py
index 57e6309616..345cde45c7 100644
--- a/cognite/client/_api/simulators/runs.py
+++ b/cognite/client/_api/simulators/runs.py
@@ -99,22 +99,22 @@ async def __call__(
Fetches simulation runs as they are iterated over, so you keep a limited number of simulation runs in memory.
Args:
- chunk_size (int | None): Number of simulation runs to return in each chunk. Defaults to yielding one simulation run a time.
- limit (int | None): The maximum number of simulation runs to return, pass None to return all.
- status (str | None): Filter by simulation run status
- run_type (str | None): Filter by simulation run type
- model_external_ids (SequenceNotStr[str] | None): Filter by simulator model external ids
- simulator_integration_external_ids (SequenceNotStr[str] | None): Filter by simulator integration external ids
- simulator_external_ids (SequenceNotStr[str] | None): Filter by simulator external ids
- routine_external_ids (SequenceNotStr[str] | None): Filter by routine external ids
- routine_revision_external_ids (SequenceNotStr[str] | None): Filter by routine revision external ids
- model_revision_external_ids (SequenceNotStr[str] | None): Filter by model revision external ids
- created_time (TimestampRange | None): Filter by created time
- simulation_time (TimestampRange | None): Filter by simulation time
- sort (SimulationRunsSort | None): The criteria to sort by.
+ chunk_size: Number of simulation runs to return in each chunk. Defaults to yielding one simulation run a time.
+ limit: The maximum number of simulation runs to return, pass None to return all.
+ status: Filter by simulation run status
+ run_type: Filter by simulation run type
+ model_external_ids: Filter by simulator model external ids
+ simulator_integration_external_ids: Filter by simulator integration external ids
+ simulator_external_ids: Filter by simulator external ids
+ routine_external_ids: Filter by routine external ids
+ routine_revision_external_ids: Filter by routine revision external ids
+ model_revision_external_ids: Filter by model revision external ids
+ created_time: Filter by created time
+ simulation_time: Filter by simulation time
+ sort: The criteria to sort by.
Yields:
- SimulationRun | SimulationRunList: yields Simulation Run one by one if chunk is not specified, else SimulatorRunsList objects.
+ yields Simulation Run one by one if chunk is not specified, else SimulatorRunsList objects.
""" # noqa: DOC404
filter_runs = SimulatorRunsFilter(
status=status,
@@ -160,21 +160,21 @@ async def list(
Retrieves a list of simulation runs that match the given criteria.
Args:
- limit (int | None): The maximum number of simulation runs to return, pass None to return all.
- status (str | None): Filter by simulation run status
- run_type (str | None): Filter by simulation run type
- model_external_ids (SequenceNotStr[str] | None): Filter by simulator model external ids
- simulator_integration_external_ids (SequenceNotStr[str] | None): Filter by simulator integration external ids
- simulator_external_ids (SequenceNotStr[str] | None): Filter by simulator external ids
- routine_external_ids (SequenceNotStr[str] | None): Filter by routine external ids
- routine_revision_external_ids (SequenceNotStr[str] | None): Filter by routine revision external ids
- model_revision_external_ids (SequenceNotStr[str] | None): Filter by model revision external ids
- created_time (TimestampRange | None): Filter by created time
- simulation_time (TimestampRange | None): Filter by simulation time
- sort (SimulationRunsSort | None): The criteria to sort by.
+ limit: The maximum number of simulation runs to return, pass None to return all.
+ status: Filter by simulation run status
+ run_type: Filter by simulation run type
+ model_external_ids: Filter by simulator model external ids
+ simulator_integration_external_ids: Filter by simulator integration external ids
+ simulator_external_ids: Filter by simulator external ids
+ routine_external_ids: Filter by routine external ids
+ routine_revision_external_ids: Filter by routine revision external ids
+ model_revision_external_ids: Filter by model revision external ids
+ created_time: Filter by created time
+ simulation_time: Filter by simulation time
+ sort: The criteria to sort by.
Returns:
- SimulationRunList: List of simulation runs
+ List of simulation runs
Examples:
List simulation runs:
@@ -239,10 +239,10 @@ async def retrieve(
"""`Retrieve simulation runs by ID `_
Args:
- ids (int | Sequence[int]): The ID(s) of the simulation run(s) to retrieve.
+ ids: The ID(s) of the simulation run(s) to retrieve.
Returns:
- SimulationRun | SimulationRunList | None: The simulation run(s) with the given ID(s)
+ The simulation run(s) with the given ID(s)
Examples:
Retrieve a single simulation run by id:
@@ -272,10 +272,10 @@ async def create(
"""`Create simulation runs `_
Args:
- items (SimulationRunWrite | Sequence[SimulationRunWrite]): The simulation run(s) to execute.
+ items: The simulation run(s) to execute.
Returns:
- SimulationRun | SimulationRunList: Created simulation run(s)
+ Created simulation run(s)
Examples:
Create new simulation run:
@@ -311,10 +311,10 @@ async def list_run_data(
Retrieve data associated with a simulation run by ID.
Args:
- run_id (int): Simulation run id.
+ run_id: Simulation run id.
Returns:
- SimulationRunDataList: List of simulation run data
+ List of simulation run data
Examples:
Get simulation run data by run id:
diff --git a/cognite/client/_api/synthetic_time_series.py b/cognite/client/_api/synthetic_time_series.py
index 3adbb99d74..6e7e867edb 100644
--- a/cognite/client/_api/synthetic_time_series.py
+++ b/cognite/client/_api/synthetic_time_series.py
@@ -104,21 +104,19 @@ async def query(
You can read the guide to synthetic time series in our `documentation `_.
Args:
- expressions (str | sympy.Basic | Sequence[str] | Sequence[sympy.Basic]): Functions to be calculated. Supports both strings and sympy expressions. Strings can have either the API `ts{}` syntax, or contain variable names to be replaced using the `variables` parameter.
- start (int | str | datetime.datetime): Inclusive start.
- end (int | str | datetime.datetime): Exclusive end.
- limit (int | None): Number of datapoints per expression to retrieve.
- variables (Mapping[str | sympy.Symbol, str | NodeId | TimeSeries | TimeSeriesWrite] | None): An optional map of symbol replacements.
- aggregate (str | None): use this aggregate when replacing entries from `variables`, does not affect time series given in the `ts{}` syntax.
- granularity (str | None): use this granularity with the aggregate.
- target_unit (str | None): use this target_unit when replacing entries from `variables`, does not affect time series given in the `ts{}` syntax.
- target_unit_system (str | None): Same as target_unit, but with unit system (e.g. SI). Only one of target_unit and target_unit_system can be specified.
- timezone (str | datetime.timezone | ZoneInfo | None): The timezone to use when aggregating datapoints. For aggregates of granularity 'hour' and longer,
- which time zone should we align to. Align to the start of the hour, start of the day or start of the month. For time zones of type Region/Location,
- the aggregate duration can vary, typically due to daylight saving time. For time zones of type UTC+/-HH:MM, use increments of 15 minutes. Default: "UTC" (None)
+ expressions: Functions to be calculated. Supports both strings and sympy expressions. Strings can have either the API `ts{}` syntax, or contain variable names to be replaced using the `variables` parameter.
+ start: Inclusive start.
+ end: Exclusive end.
+ limit: Number of datapoints per expression to retrieve.
+ variables: An optional map of symbol replacements.
+ aggregate: use this aggregate when replacing entries from `variables`, does not affect time series given in the `ts{}` syntax.
+ granularity: use this granularity with the aggregate.
+ target_unit: use this target_unit when replacing entries from `variables`, does not affect time series given in the `ts{}` syntax.
+ target_unit_system: Same as target_unit, but with unit system (e.g. SI). Only one of target_unit and target_unit_system can be specified.
+ timezone: The timezone to use when aggregating datapoints. For aggregates of granularity 'hour' and longer, which time zone should we align to. Align to the start of the hour, start of the day or start of the month. For time zones of type Region/Location, the aggregate duration can vary, typically due to daylight saving time. For time zones of type UTC+/-HH:MM, use increments of 15 minutes. Default: "UTC" (None)
Returns:
- Datapoints | DatapointsList: A DatapointsList object containing the calculated data.
+ A DatapointsList object containing the calculated data.
Examples:
diff --git a/cognite/client/_api/three_d/asset_mapping.py b/cognite/client/_api/three_d/asset_mapping.py
index 4dc36de0ae..a12f01929c 100644
--- a/cognite/client/_api/three_d/asset_mapping.py
+++ b/cognite/client/_api/three_d/asset_mapping.py
@@ -33,15 +33,15 @@ async def list(
"""`List 3D node asset mappings. `_
Args:
- model_id (int): Id of the model.
- revision_id (int): Id of the revision.
- node_id (int | None): List only asset mappings associated with this node.
- asset_id (int | None): List only asset mappings associated with this asset.
- intersects_bounding_box (BoundingBox3D | None): If given, only return asset mappings for assets whose bounding box intersects with the given bounding box.
- limit (int | None): Maximum number of asset mappings to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
+ model_id: Id of the model.
+ revision_id: Id of the revision.
+ node_id: List only asset mappings associated with this node.
+ asset_id: List only asset mappings associated with this asset.
+ intersects_bounding_box: If given, only return asset mappings for assets whose bounding box intersects with the given bounding box.
+ limit: Maximum number of asset mappings to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
Returns:
- ThreeDAssetMappingList: The list of asset mappings.
+ The list of asset mappings.
Example:
@@ -97,12 +97,12 @@ async def create(
"""`Create 3d node asset mappings. `_
Args:
- model_id (int): Id of the model.
- revision_id (int): Id of the revision.
- asset_mapping (ThreeDAssetMapping | ThreeDAssetMappingWrite | Sequence[ThreeDAssetMapping] | Sequence[ThreeDAssetMappingWrite]): The asset mapping(s) to create.
+ model_id: Id of the model.
+ revision_id: Id of the revision.
+ asset_mapping: The asset mapping(s) to create.
Returns:
- ThreeDAssetMapping | ThreeDAssetMappingList: The created asset mapping(s).
+ The created asset mapping(s).
Example:
@@ -130,9 +130,9 @@ async def delete(
"""`Delete 3d node asset mappings. `_
Args:
- model_id (int): Id of the model.
- revision_id (int): Id of the revision.
- asset_mapping (ThreeDAssetMapping | Sequence[ThreeDAssetMapping]): The asset mapping(s) to delete.
+ model_id: Id of the model.
+ revision_id: Id of the revision.
+ asset_mapping: The asset mapping(s) to delete.
Example:
diff --git a/cognite/client/_api/three_d/files.py b/cognite/client/_api/three_d/files.py
index 0c7d8a2e68..73181b41cf 100644
--- a/cognite/client/_api/three_d/files.py
+++ b/cognite/client/_api/three_d/files.py
@@ -11,10 +11,10 @@ async def retrieve(self, id: int) -> bytes:
"""`Retrieve the contents of a 3d file by id. `_
Args:
- id (int): The id of the file to retrieve.
+ id: The id of the file to retrieve.
Returns:
- bytes: The contents of the file.
+ The contents of the file.
Example:
diff --git a/cognite/client/_api/three_d/models.py b/cognite/client/_api/three_d/models.py
index 64fc3f4a1c..80baf94d92 100644
--- a/cognite/client/_api/three_d/models.py
+++ b/cognite/client/_api/three_d/models.py
@@ -37,12 +37,12 @@ async def __call__(
Fetches 3d models as they are iterated over, so you keep a limited number of 3d models in memory.
Args:
- chunk_size (int | None): Number of 3d models to return in each chunk. Defaults to yielding one model a time.
- published (bool | None): Filter based on whether or not the model has published revisions.
- limit (int | None): Maximum number of 3d models to return. Defaults to return all items.
+ chunk_size: Number of 3d models to return in each chunk. Defaults to yielding one model a time.
+ published: Filter based on whether or not the model has published revisions.
+ limit: Maximum number of 3d models to return. Defaults to return all items.
Yields:
- ThreeDModel | ThreeDModelList: yields ThreeDModel one by one if chunk is not specified, else ThreeDModelList objects.
+ yields ThreeDModel one by one if chunk is not specified, else ThreeDModelList objects.
""" # noqa: DOC404
async for item in self._list_generator(
list_cls=ThreeDModelList,
@@ -58,10 +58,10 @@ async def retrieve(self, id: int) -> ThreeDModel | None:
"""`Retrieve a 3d model by id `_
Args:
- id (int): Get the model with this id.
+ id: Get the model with this id.
Returns:
- ThreeDModel | None: The requested 3d model.
+ The requested 3d model.
Example:
@@ -78,11 +78,11 @@ async def list(self, published: bool | None = None, limit: int | None = DEFAULT_
"""`List 3d models. `_
Args:
- published (bool | None): Filter based on whether or not the model has published revisions.
- limit (int | None): Maximum number of models to retrieve. Defaults to 25. Set to -1, float("inf") or None to return all items.
+ published: Filter based on whether or not the model has published revisions.
+ limit: Maximum number of models to retrieve. Defaults to 25. Set to -1, float("inf") or None to return all items.
Returns:
- ThreeDModelList: The list of 3d models.
+ The list of 3d models.
Examples:
@@ -136,14 +136,12 @@ async def create(
"""`Create new 3d models. `_
Args:
- name (str | ThreeDModelWrite | SequenceNotStr[str | ThreeDModelWrite]): The name of the 3d model(s) or 3D
- model object to create. If a 3D model object is provided, the other arguments are ignored.
- data_set_id (int | None): The id of the dataset this 3D model belongs to.
- metadata (dict[str, str] | None): Custom, application-specific metadata. String key -> String value.
- Limits: Maximum length of key is 32 bytes, value 512 bytes, up to 16 key-value pairs.
+ name: The name of the 3d model(s) or 3D model object to create. If a 3D model object is provided, the other arguments are ignored.
+ data_set_id: The id of the dataset this 3D model belongs to.
+ metadata: Custom, application-specific metadata. String key -> String value. Limits: Maximum length of key is 32 bytes, value 512 bytes, up to 16 key-value pairs.
Returns:
- ThreeDModel | ThreeDModelList: The created 3d model(s).
+ The created 3d model(s).
Example:
@@ -196,11 +194,11 @@ async def update(
"""`Update 3d models. `_
Args:
- item (ThreeDModel | ThreeDModelUpdate | Sequence[ThreeDModel | ThreeDModelUpdate]): ThreeDModel(s) to update
- mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (ThreeDModel or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`.
+ item: ThreeDModel(s) to update
+ mode: How to update data when a non-update object is given (ThreeDModel or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`.
Returns:
- ThreeDModel | ThreeDModelList: Updated ThreeDModel(s)
+ Updated ThreeDModel(s)
Examples:
@@ -234,7 +232,7 @@ async def delete(self, id: int | Sequence[int]) -> None:
"""`Delete 3d models. `_
Args:
- id (int | Sequence[int]): ID or list of IDs to delete.
+ id: ID or list of IDs to delete.
Example:
diff --git a/cognite/client/_api/three_d/revisions.py b/cognite/client/_api/three_d/revisions.py
index ead00ab562..5a147124f3 100644
--- a/cognite/client/_api/three_d/revisions.py
+++ b/cognite/client/_api/three_d/revisions.py
@@ -36,13 +36,13 @@ async def __call__(
Fetches 3d model revisions as they are iterated over, so you keep a limited number of 3d model revisions in memory.
Args:
- model_id (int): Iterate over revisions for the model with this id.
- chunk_size (int | None): Number of 3d model revisions to return in each chunk. Defaults to yielding one model a time.
- published (bool): Filter based on whether or not the revision has been published.
- limit (int | None): Maximum number of 3d model revisions to return. Defaults to return all items.
+ model_id: Iterate over revisions for the model with this id.
+ chunk_size: Number of 3d model revisions to return in each chunk. Defaults to yielding one model a time.
+ published: Filter based on whether or not the revision has been published.
+ limit: Maximum number of 3d model revisions to return. Defaults to return all items.
Yields:
- ThreeDModelRevision | ThreeDModelRevisionList: yields ThreeDModelRevision one by one if chunk is not specified, else ThreeDModelRevisionList objects.
+ yields ThreeDModelRevision one by one if chunk is not specified, else ThreeDModelRevisionList objects.
""" # noqa: DOC404
async for item in self._list_generator(
list_cls=ThreeDModelRevisionList,
@@ -59,11 +59,11 @@ async def retrieve(self, model_id: int, id: int) -> ThreeDModelRevision | None:
"""`Retrieve a 3d model revision by id `_
Args:
- model_id (int): Get the revision under the model with this id.
- id (int): Get the model revision with this id.
+ model_id: Get the revision under the model with this id.
+ id: Get the model revision with this id.
Returns:
- ThreeDModelRevision | None: The requested 3d model revision.
+ The requested 3d model revision.
Example:
@@ -101,11 +101,11 @@ async def create(
"""`Create a revisions for a specified 3d model. `_
Args:
- model_id (int): Create revisions for this model.
- revision (ThreeDModelRevision | ThreeDModelRevisionWrite | Sequence[ThreeDModelRevision] | Sequence[ThreeDModelRevisionWrite]): The revision(s) to create.
+ model_id: Create revisions for this model.
+ revision: The revision(s) to create.
Returns:
- ThreeDModelRevision | ThreeDModelRevisionList: The created revision(s)
+ The created revision(s)
Example:
@@ -132,12 +132,12 @@ async def list(
"""`List 3d model revisions. `_
Args:
- model_id (int): List revisions under the model with this id.
- published (bool): Filter based on whether or not the revision is published.
- limit (int | None): Maximum number of models to retrieve. Defaults to 25. Set to -1, float("inf") or None to return all items.
+ model_id: List revisions under the model with this id.
+ published: Filter based on whether or not the revision is published.
+ limit: Maximum number of models to retrieve. Defaults to 25. Set to -1, float("inf") or None to return all items.
Returns:
- ThreeDModelRevisionList: The list of 3d model revisions.
+ The list of 3d model revisions.
Example:
@@ -168,12 +168,12 @@ async def update(
"""`Update 3d model revisions. `_
Args:
- model_id (int): Update the revision under the model with this id.
- item (ThreeDModelRevision | ThreeDModelRevisionUpdate | Sequence[ThreeDModelRevision | ThreeDModelRevisionUpdate]): ThreeDModelRevision(s) to update
- mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (ThreeDModelRevision or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`.
+ model_id: Update the revision under the model with this id.
+ item: ThreeDModelRevision(s) to update
+ mode: How to update data when a non-update object is given (ThreeDModelRevision or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`.
Returns:
- ThreeDModelRevision | ThreeDModelRevisionList: Updated ThreeDModelRevision(s)
+ Updated ThreeDModelRevision(s)
Examples:
@@ -205,8 +205,8 @@ async def delete(self, model_id: int, id: int | Sequence[int]) -> None:
"""`Delete 3d model revisions. `_
Args:
- model_id (int): Delete the revision under the model with this id.
- id (int | Sequence[int]): ID or list of IDs to delete.
+ model_id: Delete the revision under the model with this id.
+ id: ID or list of IDs to delete.
Example:
@@ -227,9 +227,9 @@ async def update_thumbnail(self, model_id: int, revision_id: int, file_id: int)
"""`Update a revision thumbnail. `_
Args:
- model_id (int): Id of the model.
- revision_id (int): Id of the revision.
- file_id (int): Id of the thumbnail file in the Files API.
+ model_id: Id of the model.
+ revision_id: Id of the revision.
+ file_id: Id of the thumbnail file in the Files API.
Example:
@@ -259,16 +259,16 @@ async def list_nodes(
the resulting subtree with the 'depth' query parameter.
Args:
- model_id (int): Id of the model.
- revision_id (int): Id of the revision.
- node_id (int | None): ID of the root node of the subtree you request (default is the root node).
- depth (int | None): Get sub nodes up to this many levels below the specified node. Depth 0 is the root node.
- sort_by_node_id (bool): Returns the nodes in `nodeId` order.
- partitions (int | None): The result is retrieved in this many parts in parallel. Requires `sort_by_node_id` to be set to `true`.
- limit (int | None): Maximum number of nodes to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
+ model_id: Id of the model.
+ revision_id: Id of the revision.
+ node_id: ID of the root node of the subtree you request (default is the root node).
+ depth: Get sub nodes up to this many levels below the specified node. Depth 0 is the root node.
+ sort_by_node_id: Returns the nodes in `nodeId` order.
+ partitions: The result is retrieved in this many parts in parallel. Requires `sort_by_node_id` to be set to `true`.
+ limit: Maximum number of nodes to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
Returns:
- ThreeDNodeList: The list of 3d nodes.
+ The list of 3d nodes.
Example:
@@ -302,14 +302,14 @@ async def filter_nodes(
"""`List nodes in a revision, filtered by node property values. `_
Args:
- model_id (int): Id of the model.
- revision_id (int): Id of the revision.
- properties (dict[str, dict[str, SequenceNotStr[str]]] | None): Properties for filtering. The object contains one or more category. Each category references one or more properties. Each property is associated with a list of values. For a node to satisfy the filter, it must, for each category/property in the filter, contain the category+property combination with a value that is contained within the corresponding list in the filter.
- limit (int | None): Maximum number of nodes to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
- partitions (int | None): The result is retrieved in this many parts in parallel. Requires `sort_by_node_id` to be set to `true`.
+ model_id: Id of the model.
+ revision_id: Id of the revision.
+ properties: Properties for filtering. The object contains one or more category. Each category references one or more properties. Each property is associated with a list of values. For a node to satisfy the filter, it must, for each category/property in the filter, contain the category+property combination with a value that is contained within the corresponding list in the filter.
+ limit: Maximum number of nodes to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
+ partitions: The result is retrieved in this many parts in parallel. Requires `sort_by_node_id` to be set to `true`.
Returns:
- ThreeDNodeList: The list of 3d nodes.
+ The list of 3d nodes.
Example:
@@ -337,13 +337,13 @@ async def list_ancestor_nodes(
"""`Retrieves a list of ancestor nodes of a given node, including itself, in the hierarchy of the 3D model `_
Args:
- model_id (int): Id of the model.
- revision_id (int): Id of the revision.
- node_id (int | None): ID of the node to get the ancestors of.
- limit (int | None): Maximum number of nodes to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
+ model_id: Id of the model.
+ revision_id: Id of the revision.
+ node_id: ID of the node to get the ancestors of.
+ limit: Maximum number of nodes to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
Returns:
- ThreeDNodeList: The list of 3d nodes.
+ The list of 3d nodes.
Example:
diff --git a/cognite/client/_api/time_series.py b/cognite/client/_api/time_series.py
index cd929a3fde..93b337bf83 100644
--- a/cognite/client/_api/time_series.py
+++ b/cognite/client/_api/time_series.py
@@ -128,29 +128,29 @@ async def __call__(
Fetches time series as they are iterated over, so you keep a limited number of objects in memory.
Args:
- chunk_size (int | None): Number of time series to return in each chunk. Defaults to yielding one time series a time.
- name (str | None): Name of the time series. Often referred to as tag.
- unit (str | None): Unit of the time series.
- unit_external_id (str | None): Filter on unit external ID.
- unit_quantity (str | None): Filter on unit quantity.
- is_string (bool | None): Whether the time series is a string time series.
- is_step (bool | None): Whether the time series is a step (piecewise constant) time series.
- asset_ids (Sequence[int] | None): List time series related to these assets.
- asset_external_ids (SequenceNotStr[str] | None): List time series related to these assets.
- asset_subtree_ids (int | Sequence[int] | None): Only include time series that are related to an asset in a subtree rooted at any of these assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned.
- asset_subtree_external_ids (str | SequenceNotStr[str] | None): Only include time series that are related to an asset in a subtree rooted at any of these assetExternalIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned.
- data_set_ids (int | Sequence[int] | None): Return only time series in the specified data set(s) with this id / these ids.
- data_set_external_ids (str | SequenceNotStr[str] | None): Return only time series in the specified data set(s) with this external id / these external ids.
- metadata (dict[str, Any] | None): Custom, application specific metadata. String key -> String value
- external_id_prefix (str | None): Filter by this (case-sensitive) prefix for the external ID.
- created_time (dict[str, Any] | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms.
- last_updated_time (dict[str, Any] | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms.
- limit (int | None): Maximum number of time series to return. Defaults to return all items.
- advanced_filter (Filter | dict[str, Any] | None): Advanced filter query using the filter DSL (Domain Specific Language). It allows defining complex filtering expressions that combine simple operations, such as equals, prefix, exists, etc., using boolean operators and, or, and not.
- sort (SortSpec | list[SortSpec] | None): The criteria to sort by. Defaults to desc for `_score_` and asc for all other properties. Sort is not allowed if `partitions` is used.
+ chunk_size: Number of time series to return in each chunk. Defaults to yielding one time series a time.
+ name: Name of the time series. Often referred to as tag.
+ unit: Unit of the time series.
+ unit_external_id: Filter on unit external ID.
+ unit_quantity: Filter on unit quantity.
+ is_string: Whether the time series is a string time series.
+ is_step: Whether the time series is a step (piecewise constant) time series.
+ asset_ids: List time series related to these assets.
+ asset_external_ids: List time series related to these assets.
+ asset_subtree_ids: Only include time series that are related to an asset in a subtree rooted at any of these assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned.
+ asset_subtree_external_ids: Only include time series that are related to an asset in a subtree rooted at any of these assetExternalIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned.
+ data_set_ids: Return only time series in the specified data set(s) with this id / these ids.
+ data_set_external_ids: Return only time series in the specified data set(s) with this external id / these external ids.
+ metadata: Custom, application specific metadata. String key -> String value
+ external_id_prefix: Filter by this (case-sensitive) prefix for the external ID.
+ created_time: Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms.
+ last_updated_time: Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms.
+ limit: Maximum number of time series to return. Defaults to return all items.
+ advanced_filter: Advanced filter query using the filter DSL (Domain Specific Language). It allows defining complex filtering expressions that combine simple operations, such as equals, prefix, exists, etc., using boolean operators and, or, and not.
+ sort: The criteria to sort by. Defaults to desc for `_score_` and asc for all other properties. Sort is not allowed if `partitions` is used.
Yields:
- TimeSeries | TimeSeriesList: yields TimeSeries one by one if chunk_size is not specified, else TimeSeriesList objects.
+ yields TimeSeries one by one if chunk_size is not specified, else TimeSeriesList objects.
""" # noqa: DOC404
asset_subtree_ids_processed = process_asset_subtree_ids(asset_subtree_ids, asset_subtree_external_ids)
data_set_ids_processed = process_data_set_ids(data_set_ids, data_set_external_ids)
@@ -193,12 +193,12 @@ async def retrieve(
"""`Retrieve a single time series by id. `_
Args:
- id (int | None): ID
- external_id (str | None): External ID
- instance_id (NodeId | None): Instance ID
+ id: ID
+ external_id: External ID
+ instance_id: Instance ID
Returns:
- TimeSeries | None: Requested time series or None if it does not exist.
+ Requested time series or None if it does not exist.
Examples:
@@ -230,13 +230,13 @@ async def retrieve_multiple(
"""`Retrieve multiple time series by id. `_
Args:
- ids (Sequence[int] | None): IDs
- external_ids (SequenceNotStr[str] | None): External IDs
- instance_ids (Sequence[NodeId] | None): Instance IDs
- ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception.
+ ids: IDs
+ external_ids: External IDs
+ instance_ids: Instance IDs
+ ignore_unknown_ids: Ignore IDs and external IDs that are not found rather than throw an exception.
Returns:
- TimeSeriesList: The requested time series.
+ The requested time series.
Examples:
@@ -267,11 +267,11 @@ async def aggregate_count(
"""`Count of time series matching the specified filters and search. `_
Args:
- advanced_filter (Filter | dict[str, Any] | None): The filter to narrow down the time series to count.
- filter (TimeSeriesFilter | dict[str, Any] | None): The filter to narrow down time series to count requiring exact match.
+ advanced_filter: The filter to narrow down the time series to count.
+ filter: The filter to narrow down time series to count requiring exact match.
Returns:
- int: The number of time series matching the specified filters and search.
+ The number of time series matching the specified filters and search.
Examples:
@@ -307,12 +307,12 @@ async def aggregate_cardinality_values(
"""`Find approximate property count for time series. `_
Args:
- property (TimeSeriesProperty | str | list[str]): The property to count the cardinality of.
- advanced_filter (Filter | dict[str, Any] | None): The filter to narrow down the time series to count cardinality.
- aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets.
- filter (TimeSeriesFilter | dict[str, Any] | None): The filter to narrow down the time series to count requiring exact match.
+ property: The property to count the cardinality of.
+ advanced_filter: The filter to narrow down the time series to count cardinality.
+ aggregate_filter: The filter to apply to the resulting buckets.
+ filter: The filter to narrow down the time series to count requiring exact match.
Returns:
- int: The number of properties matching the specified filters and search.
+ The number of properties matching the specified filters and search.
Examples:
@@ -356,12 +356,12 @@ async def aggregate_cardinality_properties(
"""`Find approximate paths count for time series. `_
Args:
- path (TimeSeriesProperty | str | list[str]): The scope in every document to aggregate properties. The only value allowed now is ["metadata"]. It means to aggregate only metadata properties (aka keys).
- advanced_filter (Filter | dict[str, Any] | None): The filter to narrow down the time series to count cardinality.
- aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets.
- filter (TimeSeriesFilter | dict[str, Any] | None): The filter to narrow down the time series to count requiring exact match.
+ path: The scope in every document to aggregate properties. The only value allowed now is ["metadata"]. It means to aggregate only metadata properties (aka keys).
+ advanced_filter: The filter to narrow down the time series to count cardinality.
+ aggregate_filter: The filter to apply to the resulting buckets.
+ filter: The filter to narrow down the time series to count requiring exact match.
Returns:
- int: The number of properties matching the specified filters and search.
+ The number of properties matching the specified filters and search.
Examples:
@@ -392,13 +392,13 @@ async def aggregate_unique_values(
"""`Get unique properties with counts for time series. `_
Args:
- property (TimeSeriesProperty | str | list[str]): The property to group by.
- advanced_filter (Filter | dict[str, Any] | None): The filter to narrow down the time series to count cardinality.
- aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets.
- filter (TimeSeriesFilter | dict[str, Any] | None): The filter to narrow down the time series to count requiring exact match.
+ property: The property to group by.
+ advanced_filter: The filter to narrow down the time series to count cardinality.
+ aggregate_filter: The filter to apply to the resulting buckets.
+ filter: The filter to narrow down the time series to count requiring exact match.
Returns:
- UniqueResultList: List of unique values of time series matching the specified filters and search.
+ List of unique values of time series matching the specified filters and search.
Examples:
@@ -450,13 +450,13 @@ async def aggregate_unique_properties(
"""`Get unique paths with counts for time series. `_
Args:
- path (TimeSeriesProperty | str | list[str]): The scope in every document to aggregate properties. The only value allowed now is ["metadata"]. It means to aggregate only metadata properties (aka keys).
- advanced_filter (Filter | dict[str, Any] | None): The filter to narrow down the time series to count cardinality.
- aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets.
- filter (TimeSeriesFilter | dict[str, Any] | None): The filter to narrow down the time series to count requiring exact match.
+ path: The scope in every document to aggregate properties. The only value allowed now is ["metadata"]. It means to aggregate only metadata properties (aka keys).
+ advanced_filter: The filter to narrow down the time series to count cardinality.
+ aggregate_filter: The filter to apply to the resulting buckets.
+ filter: The filter to narrow down the time series to count requiring exact match.
Returns:
- UniqueResultList: List of unique values of time series matching the specified filters and search.
+ List of unique values of time series matching the specified filters and search.
Examples:
@@ -489,10 +489,10 @@ async def create(
"""`Create one or more time series. `_
Args:
- time_series (TimeSeries | TimeSeriesWrite | Sequence[TimeSeries] | Sequence[TimeSeriesWrite]): TimeSeries or list of TimeSeries to create.
+ time_series: TimeSeries or list of TimeSeries to create.
Returns:
- TimeSeries | TimeSeriesList: The created time series.
+ The created time series.
Examples:
@@ -520,9 +520,9 @@ async def delete(
"""`Delete one or more time series. `_
Args:
- id (int | Sequence[int] | None): Id or list of ids
- external_id (str | SequenceNotStr[str] | None): External ID or list of external ids
- ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception.
+ id: Id or list of ids
+ external_id: External ID or list of external ids
+ ignore_unknown_ids: Ignore IDs and external IDs that are not found rather than throw an exception.
Examples:
@@ -564,11 +564,11 @@ async def update(
"""`Update one or more time series. `_
Args:
- item (TimeSeries | TimeSeriesWrite | TimeSeriesUpdate | Sequence[TimeSeries | TimeSeriesWrite | TimeSeriesUpdate]): Time series to update
- mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (TimeSeries or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`.
+ item: Time series to update
+ mode: How to update data when a non-update object is given (TimeSeries or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`.
Returns:
- TimeSeries | TimeSeriesList: Updated time series.
+ Updated time series.
Examples:
@@ -629,11 +629,11 @@ async def upsert(
For more details, see :ref:`appendix-upsert`.
Args:
- item (TimeSeries | TimeSeriesWrite | Sequence[TimeSeries | TimeSeriesWrite]): TimeSeries or list of TimeSeries to upsert.
- mode (Literal['patch', 'replace']): Whether to patch or replace in the case the time series are existing. If you set 'patch', the call will only update fields with non-null values (default). Setting 'replace' will unset any fields that are not specified.
+ item: TimeSeries or list of TimeSeries to upsert.
+ mode: Whether to patch or replace in the case the time series are existing. If you set 'patch', the call will only update fields with non-null values (default). Setting 'replace' will unset any fields that are not specified.
Returns:
- TimeSeries | TimeSeriesList: The upserted time series(s).
+ The upserted time series(s).
Examples:
@@ -670,14 +670,14 @@ async def search(
Primarily meant for human-centric use-cases and data exploration, not for programs, since matching and ordering may change over time. Use the `list` function if stable or exact matches are required.
Args:
- name (str | None): Prefix and fuzzy search on name.
- description (str | None): Prefix and fuzzy search on description.
- query (str | None): Search on name and description using wildcard search on each of the words (separated by spaces). Retrieves results where at least one word must match. Example: 'some other'
- filter (TimeSeriesFilter | dict[str, Any] | None): Filter to apply. Performs exact match on these fields.
- limit (int): Max number of results to return.
+ name: Prefix and fuzzy search on name.
+ description: Prefix and fuzzy search on description.
+ query: Search on name and description using wildcard search on each of the words (separated by spaces). Retrieves results where at least one word must match. Example: 'some other'
+ filter: Filter to apply. Performs exact match on these fields.
+ limit: Max number of results to return.
Returns:
- TimeSeriesList: List of requested time series.
+ List of requested time series.
Examples:
@@ -729,29 +729,29 @@ async def list(
"""`List time series `_
Args:
- name (str | None): Name of the time series. Often referred to as tag.
- unit (str | None): Unit of the time series.
- unit_external_id (str | None): Filter on unit external ID.
- unit_quantity (str | None): Filter on unit quantity.
- is_string (bool | None): Whether the time series is a string time series.
- is_step (bool | None): Whether the time series is a step (piecewise constant) time series.
- asset_ids (Sequence[int] | None): List time series related to these assets.
- asset_external_ids (SequenceNotStr[str] | None): List time series related to these assets.
- asset_subtree_ids (int | Sequence[int] | None): Only include time series that are related to an asset in a subtree rooted at any of these assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned.
- asset_subtree_external_ids (str | SequenceNotStr[str] | None): Only include time series that are related to an asset in a subtree rooted at any of these assetExternalIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned.
- data_set_ids (int | Sequence[int] | None): Return only time series in the specified data set(s) with this id / these ids.
- data_set_external_ids (str | SequenceNotStr[str] | None): Return only time series in the specified data set(s) with this external id / these external ids.
- metadata (dict[str, Any] | None): Custom, application specific metadata. String key -> String value
- external_id_prefix (str | None): Filter by this (case-sensitive) prefix for the external ID.
- created_time (dict[str, Any] | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms.
- last_updated_time (dict[str, Any] | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms.
- partitions (int | None): Retrieve resources in parallel using this number of workers (values up to 10 allowed), limit must be set to `None` (or `-1`).
- limit (int | None): Maximum number of time series to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
- advanced_filter (Filter | dict[str, Any] | None): Advanced filter query using the filter DSL (Domain Specific Language). It allows defining complex filtering expressions that combine simple operations, such as equals, prefix, exists, etc., using boolean operators and, or, and not. See examples below for usage.
- sort (SortSpec | list[SortSpec] | TimeSeriesProperty | None): The criteria to sort by. Defaults to desc for `_score_` and asc for all other properties. Sort is not allowed if `partitions` is used.
+ name: Name of the time series. Often referred to as tag.
+ unit: Unit of the time series.
+ unit_external_id: Filter on unit external ID.
+ unit_quantity: Filter on unit quantity.
+ is_string: Whether the time series is a string time series.
+ is_step: Whether the time series is a step (piecewise constant) time series.
+ asset_ids: List time series related to these assets.
+ asset_external_ids: List time series related to these assets.
+ asset_subtree_ids: Only include time series that are related to an asset in a subtree rooted at any of these assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned.
+ asset_subtree_external_ids: Only include time series that are related to an asset in a subtree rooted at any of these assetExternalIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned.
+ data_set_ids: Return only time series in the specified data set(s) with this id / these ids.
+ data_set_external_ids: Return only time series in the specified data set(s) with this external id / these external ids.
+ metadata: Custom, application specific metadata. String key -> String value
+ external_id_prefix: Filter by this (case-sensitive) prefix for the external ID.
+ created_time: Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms.
+ last_updated_time: Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms.
+ partitions: Retrieve resources in parallel using this number of workers (values up to 10 allowed), limit must be set to `None` (or `-1`).
+ limit: Maximum number of time series to return. Defaults to 25. Set to -1, float("inf") or None to return all items.
+ advanced_filter: Advanced filter query using the filter DSL (Domain Specific Language). It allows defining complex filtering expressions that combine simple operations, such as equals, prefix, exists, etc., using boolean operators and, or, and not. See examples below for usage.
+ sort: The criteria to sort by. Defaults to desc for `_score_` and asc for all other properties. Sort is not allowed if `partitions` is used.
Returns:
- TimeSeriesList: The requested time series.
+ The requested time series.
.. note::
When using `partitions`, there are few considerations to keep in mind:
diff --git a/cognite/client/_api/transformations/__init__.py b/cognite/client/_api/transformations/__init__.py
index 9c98d98e70..914cf31de2 100644
--- a/cognite/client/_api/transformations/__init__.py
+++ b/cognite/client/_api/transformations/__init__.py
@@ -95,23 +95,23 @@ async def __call__(
"""Iterate over transformations
Args:
- chunk_size (int | None): Number of transformations to return in each chunk. Defaults to yielding one transformation a time.
- include_public (bool): Whether public transformations should be included in the results. (default true).
- name_regex (str | None): Regex expression to match the transformation name
- query_regex (str | None): Regex expression to match the transformation query
- destination_type (str | None): Transformation destination resource name to filter by.
- conflict_mode (str | None): Filters by a selected transformation action type: abort/create, upsert, update, delete
- cdf_project_name (str | None): Project name to filter by configured source and destination project
- has_blocked_error (bool | None): Whether only the blocked transformations should be included in the results.
- created_time (dict[str, Any] | TimestampRange | None): Range between two timestamps
- last_updated_time (dict[str, Any] | TimestampRange | None): Range between two timestamps
- data_set_ids (int | list[int] | None): Return only transformations in the specified data sets with these id(s).
- data_set_external_ids (str | list[str] | None): Return only transformations in the specified data sets with these external id(s).
- tags (TagsFilter | None): Return only the resource matching the specified tags constraints. It only supports ContainsAny as of now.
- limit (int | None): Limits the number of results to be returned. Defaults to yielding all transformations.
+ chunk_size: Number of transformations to return in each chunk. Defaults to yielding one transformation a time.
+ include_public: Whether public transformations should be included in the results. (default true).
+ name_regex: Regex expression to match the transformation name
+ query_regex: Regex expression to match the transformation query
+ destination_type: Transformation destination resource name to filter by.
+ conflict_mode: Filters by a selected transformation action type: abort/create, upsert, update, delete
+ cdf_project_name: Project name to filter by configured source and destination project
+ has_blocked_error: Whether only the blocked transformations should be included in the results.
+ created_time: Range between two timestamps
+ last_updated_time: Range between two timestamps
+ data_set_ids: Return only transformations in the specified data sets with these id(s).
+ data_set_external_ids: Return only transformations in the specified data sets with these external id(s).
+ tags: Return only the resource matching the specified tags constraints. It only supports ContainsAny as of now.
+ limit: Limits the number of results to be returned. Defaults to yielding all transformations.
Yields:
- Transformation | TransformationList: Yields transformations in chunks if chunk_size is specified, otherwise one transformation at a time.
+ Yields transformations in chunks if chunk_size is specified, otherwise one transformation at a time.
""" # noqa: DOC404
ds_ids = IdentifierSequence.load(data_set_ids, data_set_external_ids, id_name="data_set").as_dicts()
@@ -155,10 +155,10 @@ async def create(
"""`Create one or more transformations. `_
Args:
- transformation (Transformation | TransformationWrite | Sequence[Transformation] | Sequence[TransformationWrite]): Transformation or list of transformations to create.
+ transformation: Transformation or list of transformations to create.
Returns:
- Transformation | TransformationList: Created transformation(s)
+ Created transformation(s)
Examples:
@@ -257,9 +257,9 @@ async def delete(
"""`Delete one or more transformations. `_
Args:
- id (int | Sequence[int] | None): Id or list of ids.
- external_id (str | SequenceNotStr[str] | None): External ID or list of external ids.
- ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception.
+ id: Id or list of ids.
+ external_id: External ID or list of external ids.
+ ignore_unknown_ids: Ignore IDs and external IDs that are not found rather than throw an exception.
Example:
@@ -295,22 +295,22 @@ async def list(
"""`List all transformations. `_
Args:
- include_public (bool): Whether public transformations should be included in the results. (default true).
- name_regex (str | None): Regex expression to match the transformation name
- query_regex (str | None): Regex expression to match the transformation query
- destination_type (str | None): Transformation destination resource name to filter by.
- conflict_mode (str | None): Filters by a selected transformation action type: abort/create, upsert, update, delete
- cdf_project_name (str | None): Project name to filter by configured source and destination project
- has_blocked_error (bool | None): Whether only the blocked transformations should be included in the results.
- created_time (dict[str, Any] | TimestampRange | None): Range between two timestamps
- last_updated_time (dict[str, Any] | TimestampRange | None): Range between two timestamps
- data_set_ids (int | list[int] | None): Return only transformations in the specified data sets with these id(s).
- data_set_external_ids (str | list[str] | None): Return only transformations in the specified data sets with these external id(s).
- tags (TagsFilter | None): Return only the resource matching the specified tags constraints. It only supports ContainsAny as of now.
- limit (int | None): Limits the number of results to be returned. To retrieve all results use limit=-1, default limit is 25.
+ include_public: Whether public transformations should be included in the results. (default true).
+ name_regex: Regex expression to match the transformation name
+ query_regex: Regex expression to match the transformation query
+ destination_type: Transformation destination resource name to filter by.
+ conflict_mode: Filters by a selected transformation action type: abort/create, upsert, update, delete
+ cdf_project_name: Project name to filter by configured source and destination project
+ has_blocked_error: Whether only the blocked transformations should be included in the results.
+ created_time: Range between two timestamps
+ last_updated_time: Range between two timestamps
+ data_set_ids: Return only transformations in the specified data sets with these id(s).
+ data_set_external_ids: Return only transformations in the specified data sets with these external id(s).
+ tags: Return only the resource matching the specified tags constraints. It only supports ContainsAny as of now.
+ limit: Limits the number of results to be returned. To retrieve all results use limit=-1, default limit is 25.
Returns:
- TransformationList: List of transformations
+ List of transformations
Example:
@@ -350,11 +350,11 @@ async def retrieve(self, id: int | None = None, external_id: str | None = None)
"""`Retrieve a single transformation by id. `_
Args:
- id (int | None): ID
- external_id (str | None): No description.
+ id: ID
+ external_id: No description.
Returns:
- Transformation | None: Requested transformation or None if it does not exist.
+ Requested transformation or None if it does not exist.
Examples:
@@ -385,12 +385,12 @@ async def retrieve_multiple(
"""`Retrieve multiple transformations.