diff --git a/.gemini/styleguide.md b/.gemini/styleguide.md index f271f74cb4..13b38e856f 100644 --- a/.gemini/styleguide.md +++ b/.gemini/styleguide.md @@ -101,10 +101,10 @@ def render_header(header: str) -> str: Renders a (markdown) heading. Args: - header (str): header + header: header Returns: - str: The rendered header + The rendered header """ return f"{header}\n{'=' * len(header)}\n" diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 4ad3a87c28..876744fa7e 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -65,3 +65,6 @@ repos: - --quiet # otherwise prints all checked files that are ok... - --skip-checking-raises=true - --check-class-attributes=false + - --arg-type-hints-in-docstring=False + - --check-return-types=False + - --check-yield-types=False \ No newline at end of file diff --git a/cognite/client/_api/agents/agents.py b/cognite/client/_api/agents/agents.py index 0a277bfbb7..28d633dd9d 100644 --- a/cognite/client/_api/agents/agents.py +++ b/cognite/client/_api/agents/agents.py @@ -40,10 +40,10 @@ async def upsert(self, agents: AgentUpsert | Sequence[AgentUpsert]) -> Agent | A """`Create or update (upsert) one or more agents. `_ Args: - agents (AgentUpsert | Sequence[AgentUpsert]): Agent or list of agents to create or update. + agents: Agent or list of agents to create or update. Returns: - Agent | AgentList: The created or updated agent(s). + The created or updated agent(s). Examples: @@ -178,11 +178,11 @@ async def retrieve( """`Retrieve one or more agents by external ID. `_ Args: - external_ids (str | SequenceNotStr[str]): The external id of the agent(s) to retrieve. - ignore_unknown_ids (bool): Whether to ignore unknown IDs. Defaults to False. + external_ids: The external id of the agent(s) to retrieve. + ignore_unknown_ids: Whether to ignore unknown IDs. Defaults to False. Returns: - Agent | AgentList | None: The requested agent or agent list. `None` is returned if `ignore_unknown_ids` is `True` and the external ID is not found. + The requested agent or agent list. `None` is returned if `ignore_unknown_ids` is `True` and the external ID is not found. Examples: @@ -210,8 +210,8 @@ async def delete(self, external_ids: str | SequenceNotStr[str], ignore_unknown_i """`Delete one or more agents. `_ Args: - external_ids (str | SequenceNotStr[str]): External ID of the agent or a list of external ids. - ignore_unknown_ids (bool): If `True`, the call will ignore unknown external IDs. Defaults to False. + external_ids: External ID of the agent or a list of external ids. + ignore_unknown_ids: If `True`, the call will ignore unknown external IDs. Defaults to False. Examples: @@ -234,7 +234,7 @@ async def list(self) -> AgentList: # The API does not yet support limit or pagi """`List agents. `_ Returns: - AgentList: The list of agents. + The list of agents. Examples: @@ -263,14 +263,13 @@ async def chat( Users can ensure conversation continuity by including the cursor from the previous response in subsequent requests. Args: - agent_external_id (str): External ID that uniquely identifies the agent. - messages (Message | ActionResult | Sequence[Message | ActionResult]): A list of one or many input messages to the agent. Can include regular messages and action results. - cursor (str | None): The cursor to use for continuation of a conversation. Use this to - create multi-turn conversations, as the cursor will keep track of the conversation state. - actions (Sequence[Action] | None): A list of client-side actions that can be called by the agent. + agent_external_id: External ID that uniquely identifies the agent. + messages: A list of one or many input messages to the agent. Can include regular messages and action results. + cursor: The cursor to use for continuation of a conversation. Use this to create multi-turn conversations, as the cursor will keep track of the conversation state. + actions: A list of client-side actions that can be called by the agent. Returns: - AgentChatResponse: The response from the agent. + The response from the agent. Examples: diff --git a/cognite/client/_api/ai/tools/documents.py b/cognite/client/_api/ai/tools/documents.py index 9ac2f27ecf..d4a194e77b 100644 --- a/cognite/client/_api/ai/tools/documents.py +++ b/cognite/client/_api/ai/tools/documents.py @@ -25,12 +25,12 @@ async def summarize( this may be extended in the future. Args: - id (int | None): The ID of the document - external_id (str | None): The external ID of the document - instance_id (NodeId | None): The instance ID of the document + id: The ID of the document + external_id: The external ID of the document + instance_id: The instance ID of the document Returns: - Summary: A summary of the document. + A summary of the document. Examples: @@ -85,16 +85,16 @@ async def ask_question( Supports up to 100 documents at a time. Args: - question (str): The question. - id (int | Sequence[int] | None): The ID(s) of the document(s) - external_id (str | Sequence[str] | None): The external ID(s) of the document(s) - instance_id (NodeId | Sequence[NodeId] | None): The instance ID(s) of the document(s) - language (AnswerLanguage | Literal['Chinese', 'Dutch', 'English', 'French', 'German', 'Italian', 'Japanese', 'Korean', 'Latvian', 'Norwegian', 'Portuguese', 'Spanish', 'Swedish']): The desired language of the answer, defaults to English. - additional_context (str | None): Additional context that you want the LLM to take into account. - ignore_unknown_ids (bool): Whether to skip documents that do not exist or that are not fully processed, instead of throwing an error. If no valid documents are found, an error will always be raised. + question: The question. + id: The ID(s) of the document(s) + external_id: The external ID(s) of the document(s) + instance_id: The instance ID(s) of the document(s) + language: The desired language of the answer, defaults to English. + additional_context: Additional context that you want the LLM to take into account. + ignore_unknown_ids: Whether to skip documents that do not exist or that are not fully processed, instead of throwing an error. If no valid documents are found, an error will always be raised. Returns: - Answer: The answer to the question in the form of a list of multiple content objects, each consisting of a chunk of text along with a set of references. + The answer to the question in the form of a list of multiple content objects, each consisting of a chunk of text along with a set of references. Examples: diff --git a/cognite/client/_api/annotations.py b/cognite/client/_api/annotations.py index c730e834b5..37ee9218d1 100644 --- a/cognite/client/_api/annotations.py +++ b/cognite/client/_api/annotations.py @@ -42,10 +42,10 @@ async def create( """`Create annotations `_ Args: - annotations (Annotation | AnnotationWrite | Sequence[Annotation | AnnotationWrite]): Annotation(s) to create + annotations: Annotation(s) to create Returns: - Annotation | AnnotationList: Created annotation(s) + Created annotation(s) """ assert_type(annotations, "annotations", [AnnotationCore, Sequence]) @@ -69,10 +69,10 @@ async def suggest( """`Suggest annotations `_ Args: - annotations (Annotation | AnnotationWrite | Sequence[Annotation] | Sequence[AnnotationWrite]): annotation(s) to suggest. They must have status set to "suggested". + annotations: annotation(s) to suggest. They must have status set to "suggested". Returns: - Annotation | AnnotationList: suggested annotation(s) + suggested annotation(s) """ assert_type(annotations, "annotations", [Annotation, AnnotationWrite, Sequence]) # Deal with status fields in both cases: Single item and list of items @@ -141,11 +141,11 @@ async def update( """`Update annotations `_ Args: - item (Annotation | AnnotationWrite | AnnotationUpdate | Sequence[Annotation | AnnotationWrite | AnnotationUpdate]): Annotation or list of annotations to update (or patch or list of patches to apply) - mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (Annotation or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. + item: Annotation or list of annotations to update (or patch or list of patches to apply) + mode: How to update data when a non-update object is given (Annotation or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. Returns: - Annotation | AnnotationList: No description.""" + No description.""" return await self._update_multiple( list_cls=AnnotationList, resource_cls=Annotation, update_cls=AnnotationUpdate, items=item, mode=mode ) @@ -154,7 +154,7 @@ async def delete(self, id: int | Sequence[int]) -> None: """`Delete annotations `_ Args: - id (int | Sequence[int]): ID or list of IDs to be deleted + id: ID or list of IDs to be deleted """ await self._delete_multiple(identifiers=IdentifierSequence.load(ids=id), wrap_ids=True) @@ -162,10 +162,10 @@ async def retrieve_multiple(self, ids: Sequence[int]) -> AnnotationList: """`Retrieve annotations by IDs `_` Args: - ids (Sequence[int]): list of IDs to be retrieved + ids: list of IDs to be retrieved Returns: - AnnotationList: list of annotations + list of annotations """ identifiers = IdentifierSequence.load(ids=ids, external_ids=None) return await self._retrieve_multiple(list_cls=AnnotationList, resource_cls=Annotation, identifiers=identifiers) @@ -174,10 +174,10 @@ async def retrieve(self, id: int) -> Annotation | None: """`Retrieve an annotation by id `_ Args: - id (int): id of the annotation to be retrieved + id: id of the annotation to be retrieved Returns: - Annotation | None: annotation requested + annotation requested """ identifiers = IdentifierSequence.load(ids=id, external_ids=None).as_singleton() return await self._retrieve_multiple(list_cls=AnnotationList, resource_cls=Annotation, identifiers=identifiers) @@ -188,11 +188,11 @@ async def reverse_lookup( """Reverse lookup annotated resources based on having annotations matching the filter. Args: - filter (AnnotationReverseLookupFilter): Filter to apply - limit (int | None): Maximum number of results to return. Defaults to None (all). + filter: Filter to apply + limit: Maximum number of results to return. Defaults to None (all). Returns: - ResourceReferenceList: List of resource references + List of resource references Examples: @@ -225,11 +225,11 @@ async def list(self, filter: AnnotationFilter | dict, limit: int | None = DEFAUL Passing a filter with both 'annotated_resource_type' and 'annotated_resource_ids' is always required. Args: - filter (AnnotationFilter | dict): Return annotations with parameter values that match what is specified. - limit (int | None): Maximum number of annotations to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + filter: Return annotations with parameter values that match what is specified. + limit: Maximum number of annotations to return. Defaults to 25. Set to -1, float("inf") or None to return all items. Returns: - AnnotationList: list of annotations + list of annotations Example: diff --git a/cognite/client/_api/assets.py b/cognite/client/_api/assets.py index 09937332e2..ceb515e4ea 100644 --- a/cognite/client/_api/assets.py +++ b/cognite/client/_api/assets.py @@ -149,29 +149,29 @@ async def __call__( Fetches assets as they are iterated over, so you keep a limited number of assets in memory. Args: - chunk_size (int | None): Number of assets to return in each chunk. Defaults to yielding one asset a time. - name (str | None): Name of asset. Often referred to as tag. - parent_ids (Sequence[int] | None): Return only the direct descendants of the specified assets. - parent_external_ids (SequenceNotStr[str] | None): Return only the direct descendants of the specified assets. - asset_subtree_ids (int | Sequence[int] | None): Only include assets in subtrees rooted at any of the specified assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. - asset_subtree_external_ids (str | SequenceNotStr[str] | None): Only include assets in subtrees rooted at any of the specified assetExternalIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. - metadata (dict[str, str] | None): Custom, application specific metadata. String key -> String value - data_set_ids (int | Sequence[int] | None): Return only assets in the specified data set(s) with this id / these ids. - data_set_external_ids (str | SequenceNotStr[str] | None): Return only assets in the specified data set(s) with this external id / these external ids. - labels (LabelFilter | None): Return only the assets matching the specified label. - geo_location (GeoLocationFilter | None): Only include files matching the specified geographic relation. - source (str | None): The source of this asset - created_time (TimestampRange | dict[str, Any] | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. - last_updated_time (TimestampRange | dict[str, Any] | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. - root (bool | None): filtered assets are root assets or not - external_id_prefix (str | None): Filter by this (case-sensitive) prefix for the external ID. - aggregated_properties (Sequence[AggregateAssetProperty] | None): Set of aggregated properties to include. Options are childCount, path, depth. - limit (int | None): Maximum number of assets to return. Defaults to return all items. - advanced_filter (Filter | dict[str, Any] | None): Advanced filter query using the filter DSL (Domain Specific Language). It allows defining complex filtering expressions that combine simple operations, such as equals, prefix, exists, etc., using boolean operators and, or, and not. - sort (SortSpec | list[SortSpec] | None): The criteria to sort by. Defaults to desc for `_score_` and asc for all other properties. Sort is not allowed if `partitions` is used. + chunk_size: Number of assets to return in each chunk. Defaults to yielding one asset a time. + name: Name of asset. Often referred to as tag. + parent_ids: Return only the direct descendants of the specified assets. + parent_external_ids: Return only the direct descendants of the specified assets. + asset_subtree_ids: Only include assets in subtrees rooted at any of the specified assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. + asset_subtree_external_ids: Only include assets in subtrees rooted at any of the specified assetExternalIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. + metadata: Custom, application specific metadata. String key -> String value + data_set_ids: Return only assets in the specified data set(s) with this id / these ids. + data_set_external_ids: Return only assets in the specified data set(s) with this external id / these external ids. + labels: Return only the assets matching the specified label. + geo_location: Only include files matching the specified geographic relation. + source: The source of this asset + created_time: Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. + last_updated_time: Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. + root: filtered assets are root assets or not + external_id_prefix: Filter by this (case-sensitive) prefix for the external ID. + aggregated_properties: Set of aggregated properties to include. Options are childCount, path, depth. + limit: Maximum number of assets to return. Defaults to return all items. + advanced_filter: Advanced filter query using the filter DSL (Domain Specific Language). It allows defining complex filtering expressions that combine simple operations, such as equals, prefix, exists, etc., using boolean operators and, or, and not. + sort: The criteria to sort by. Defaults to desc for `_score_` and asc for all other properties. Sort is not allowed if `partitions` is used. Yields: - Asset | AssetList: yields Asset one by one if chunk_size is not specified, else AssetList objects. + yields Asset one by one if chunk_size is not specified, else AssetList objects. """ # noqa: DOC404 agg_props = self._process_aggregated_props(aggregated_properties) asset_subtree_ids_processed = process_asset_subtree_ids(asset_subtree_ids, asset_subtree_external_ids) @@ -213,11 +213,11 @@ async def retrieve(self, id: int | None = None, external_id: str | None = None) """`Retrieve a single asset by id. `_ Args: - id (int | None): ID - external_id (str | None): External ID + id: ID + external_id: External ID Returns: - Asset | None: Requested asset or None if it does not exist. + Requested asset or None if it does not exist. Examples: @@ -244,12 +244,12 @@ async def retrieve_multiple( """`Retrieve multiple assets by id. `_ Args: - ids (Sequence[int] | None): IDs - external_ids (SequenceNotStr[str] | None): External IDs - ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception. + ids: IDs + external_ids: External IDs + ignore_unknown_ids: Ignore IDs and external IDs that are not found rather than throw an exception. Returns: - AssetList: The requested assets. + The requested assets. Examples: @@ -278,12 +278,12 @@ async def aggregate_count( """`Count of assets matching the specified filters. `_ Args: - property (AssetPropertyLike | None): If specified, get an approximate number of asset with a specific property (property is not null) and matching the filters. - advanced_filter (Filter | dict[str, Any] | None): The advanced filter to narrow down the assets to count. - filter (AssetFilter | dict[str, Any] | None): The filter to narrow down the assets to count (strict matching). + property: If specified, get an approximate number of asset with a specific property (property is not null) and matching the filters. + advanced_filter: The advanced filter to narrow down the assets to count. + filter: The filter to narrow down the assets to count (strict matching). Returns: - int: The number of assets matching the specified filters. + The number of assets matching the specified filters. Examples: @@ -320,12 +320,12 @@ async def aggregate_cardinality_values( """`Find approximate property count for assets. `_ Args: - property (AssetPropertyLike): The property to count the cardinality of. - advanced_filter (Filter | dict[str, Any] | None): The advanced filter to narrow down assets. - aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets. - filter (AssetFilter | dict[str, Any] | None): The filter to narrow down assets (strict matching). + property: The property to count the cardinality of. + advanced_filter: The advanced filter to narrow down assets. + aggregate_filter: The filter to apply to the resulting buckets. + filter: The filter to narrow down assets (strict matching). Returns: - int: The number of properties matching the specified filters and search. + The number of properties matching the specified filters and search. Examples: @@ -365,13 +365,12 @@ async def aggregate_cardinality_properties( """`Find approximate paths count for assets. `_ Args: - path (AssetPropertyLike): The scope in every document to aggregate properties. The only value allowed now is ["metadata"]. - It means to aggregate only metadata properties (aka keys). - advanced_filter (Filter | dict[str, Any] | None): The advanced filter to narrow down assets. - aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets. - filter (AssetFilter | dict[str, Any] | None): The filter to narrow down assets (strict matching). + path: The scope in every document to aggregate properties. The only value allowed now is ["metadata"]. It means to aggregate only metadata properties (aka keys). + advanced_filter: The advanced filter to narrow down assets. + aggregate_filter: The filter to apply to the resulting buckets. + filter: The filter to narrow down assets (strict matching). Returns: - int: The number of properties matching the specified filters. + The number of properties matching the specified filters. Examples: @@ -405,13 +404,13 @@ async def aggregate_unique_values( In the case of text fields, the values are aggregated in a case-insensitive manner. Args: - property (AssetPropertyLike): The property to group by. - advanced_filter (Filter | dict[str, Any] | None): The advanced filter to narrow down assets. - aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets. - filter (AssetFilter | dict[str, Any] | None): The filter to narrow down assets (strict matching). + property: The property to group by. + advanced_filter: The advanced filter to narrow down assets. + aggregate_filter: The filter to apply to the resulting buckets. + filter: The filter to narrow down assets (strict matching). Returns: - UniqueResultList: List of unique values of assets matching the specified filters and search. + List of unique values of assets matching the specified filters and search. Examples: @@ -468,14 +467,13 @@ async def aggregate_unique_properties( In the case of text fields, the values are aggregated in a case-insensitive manner. Args: - path (AssetPropertyLike): The scope in every document to aggregate properties. The only value allowed now is ["metadata"]. - It means to aggregate only metadata properties (aka keys). - advanced_filter (Filter | dict[str, Any] | None): The advanced filter to narrow down assets. - aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets. - filter (AssetFilter | dict[str, Any] | None): The filter to narrow down assets (strict matching). + path: The scope in every document to aggregate properties. The only value allowed now is ["metadata"]. It means to aggregate only metadata properties (aka keys). + advanced_filter: The advanced filter to narrow down assets. + aggregate_filter: The filter to apply to the resulting buckets. + filter: The filter to narrow down assets (strict matching). Returns: - UniqueResultList: List of unique values of assets matching the specified filters and search. + List of unique values of assets matching the specified filters and search. Examples: @@ -509,10 +507,10 @@ async def create(self, asset: Asset | AssetWrite | Sequence[Asset] | Sequence[As When specifying parent-child relation between assets using `parentExternalId` the link will be resvoled into an internal ID and stored as `parentId`. Args: - asset (Asset | AssetWrite | Sequence[Asset] | Sequence[AssetWrite]): Asset or list of assets to create. + asset: Asset or list of assets to create. Returns: - Asset | AssetList: Created asset(s) + Created asset(s) Examples: @@ -552,12 +550,12 @@ async def create_hierarchy( assets, so you may pass zero, one or many (same goes for the non-root assets). Args: - assets (Sequence[AssetWrite] | AssetHierarchy): List of assets to create or an instance of AssetHierarchy. - upsert (bool): If used, already existing assets will be updated instead of an exception being raised. You may control how updates are applied with the 'upsert_mode' argument. - upsert_mode (Literal['patch', 'replace']): Only applicable with upsert. Pass 'patch' to only update fields with non-null values (default), or 'replace' to do full updates (unset fields become null or empty). + assets: List of assets to create or an instance of AssetHierarchy. + upsert: If used, already existing assets will be updated instead of an exception being raised. You may control how updates are applied with the 'upsert_mode' argument. + upsert_mode: Only applicable with upsert. Pass 'patch' to only update fields with non-null values (default), or 'replace' to do full updates (unset fields become null or empty). Returns: - AssetList: Created (and possibly updated) asset hierarchy + Created (and possibly updated) asset hierarchy Prior to insertion, this function will run validation on the given assets and raise an error if any of the following issues are found: @@ -680,10 +678,10 @@ async def delete( """`Delete one or more assets `_ Args: - id (int | Sequence[int] | None): Id or list of ids - external_id (str | SequenceNotStr[str] | None): External ID or list of external ids - recursive (bool): Recursively delete whole asset subtrees under given ids. Defaults to False. - ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception. + id: Id or list of ids + external_id: External ID or list of external ids + recursive: Recursively delete whole asset subtrees under given ids. Defaults to False. + ignore_unknown_ids: Ignore IDs and external IDs that are not found rather than throw an exception. Examples: @@ -723,10 +721,10 @@ async def update( Labels can be added, removed or replaced (set). Note that set operation deletes all the existing labels and adds the new specified labels. Args: - item (Asset | AssetWrite | AssetUpdate | Sequence[Asset | AssetWrite | AssetUpdate]): Asset(s) to update - mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (Asset or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. + item: Asset(s) to update + mode: How to update data when a non-update object is given (Asset or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. Returns: - Asset | AssetList: Updated asset(s) + Updated asset(s) Examples: Perform a partial update on an asset, updating the description and adding a new field to metadata: @@ -791,11 +789,11 @@ async def upsert( For more details, see :ref:`appendix-upsert`. Args: - item (Asset | AssetWrite | Sequence[Asset | AssetWrite]): Asset or list of assets to upsert. - mode (Literal['patch', 'replace']): Whether to patch or replace in the case the assets are existing. If you set 'patch', the call will only update fields with non-null values (default). Setting 'replace' will unset any fields that are not specified. + item: Asset or list of assets to upsert. + mode: Whether to patch or replace in the case the assets are existing. If you set 'patch', the call will only update fields with non-null values (default). Setting 'replace' will unset any fields that are not specified. Returns: - Asset | AssetList: The upserted asset(s). + The upserted asset(s). Examples: @@ -834,14 +832,14 @@ async def search( Primarily meant for human-centric use-cases and data exploration, not for programs, since matching and ordering may change over time. Use the `list` function if stable or exact matches are required. Args: - name (str | None): Fuzzy match on name. - description (str | None): Fuzzy match on description. - query (str | None): Whitespace-separated terms to search for in assets. Does a best-effort fuzzy search in relevant fields (currently name and description) for variations of any of the search terms, and orders results by relevance. - filter (AssetFilter | dict[str, Any] | None): Filter to apply. Performs exact match on these fields. - limit (int): Maximum number of results to return. + name: Fuzzy match on name. + description: Fuzzy match on description. + query: Whitespace-separated terms to search for in assets. Does a best-effort fuzzy search in relevant fields (currently name and description) for variations of any of the search terms, and orders results by relevance. + filter: Filter to apply. Performs exact match on these fields. + limit: Maximum number of results to return. Returns: - AssetList: List of requested assets + List of requested assets Examples: @@ -882,12 +880,12 @@ async def retrieve_subtree( """Retrieve the subtree for this asset up to a specified depth. Args: - id (int | None): Id of the root asset in the subtree. - external_id (str | None): External id of the root asset in the subtree. - depth (int | None): Retrieve assets up to this depth below the root asset in the subtree. Omit to get the entire subtree. + id: Id of the root asset in the subtree. + external_id: External id of the root asset in the subtree. + depth: Retrieve assets up to this depth below the root asset in the subtree. Omit to get the entire subtree. Returns: - AssetList: The requested assets or empty AssetList if asset does not exist. + The requested assets or empty AssetList if asset does not exist. """ asset = await self.retrieve(id=id, external_id=external_id) if asset is None: @@ -941,29 +939,29 @@ async def list( """`List assets `_ Args: - name (str | None): Name of asset. Often referred to as tag. - parent_ids (Sequence[int] | None): Return only the direct descendants of the specified assets. - parent_external_ids (SequenceNotStr[str] | None): Return only the direct descendants of the specified assets. - asset_subtree_ids (int | Sequence[int] | None): Only include assets in subtrees rooted at any of the specified assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. - asset_subtree_external_ids (str | SequenceNotStr[str] | None): Only include assets in subtrees rooted at any of the specified assetExternalIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. - data_set_ids (int | Sequence[int] | None): Return only assets in the specified data set(s) with this id / these ids. - data_set_external_ids (str | SequenceNotStr[str] | None): Return only assets in the specified data set(s) with this external id / these external ids. - labels (LabelFilter | None): Return only the assets matching the specified label filter. - geo_location (GeoLocationFilter | None): Only include files matching the specified geographic relation. - metadata (dict[str, str] | None): Custom, application specific metadata. String key -> String value. - source (str | None): The source of this asset. - created_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. - last_updated_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. - root (bool | None): filtered assets are root assets or not. - external_id_prefix (str | None): Filter by this (case-sensitive) prefix for the external ID. - aggregated_properties (Sequence[AggregateAssetProperty] | None): Set of aggregated properties to include. Options are childCount, path, depth. - partitions (int | None): Retrieve resources in parallel using this number of workers (values up to 10 allowed), limit must be set to `None` (or `-1`). - limit (int | None): Maximum number of assets to return. Defaults to 25. Set to -1, float("inf") or None to return all items. - advanced_filter (Filter | dict[str, Any] | None): Advanced filter query using the filter DSL (Domain Specific Language). It allows defining complex filtering expressions that combine simple operations, such as equals, prefix, exists, etc., using boolean operators and, or, and not. See examples below for usage. - sort (SortSpec | list[SortSpec] | None): The criteria to sort by. Defaults to desc for `_score_` and asc for all other properties. Sort is not allowed if `partitions` is used. + name: Name of asset. Often referred to as tag. + parent_ids: Return only the direct descendants of the specified assets. + parent_external_ids: Return only the direct descendants of the specified assets. + asset_subtree_ids: Only include assets in subtrees rooted at any of the specified assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. + asset_subtree_external_ids: Only include assets in subtrees rooted at any of the specified assetExternalIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. + data_set_ids: Return only assets in the specified data set(s) with this id / these ids. + data_set_external_ids: Return only assets in the specified data set(s) with this external id / these external ids. + labels: Return only the assets matching the specified label filter. + geo_location: Only include files matching the specified geographic relation. + metadata: Custom, application specific metadata. String key -> String value. + source: The source of this asset. + created_time: Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. + last_updated_time: Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. + root: filtered assets are root assets or not. + external_id_prefix: Filter by this (case-sensitive) prefix for the external ID. + aggregated_properties: Set of aggregated properties to include. Options are childCount, path, depth. + partitions: Retrieve resources in parallel using this number of workers (values up to 10 allowed), limit must be set to `None` (or `-1`). + limit: Maximum number of assets to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + advanced_filter: Advanced filter query using the filter DSL (Domain Specific Language). It allows defining complex filtering expressions that combine simple operations, such as equals, prefix, exists, etc., using boolean operators and, or, and not. See examples below for usage. + sort: The criteria to sort by. Defaults to desc for `_score_` and asc for all other properties. Sort is not allowed if `partitions` is used. Returns: - AssetList: List of requested assets + List of requested assets .. note:: When using `partitions`, there are few considerations to keep in mind: diff --git a/cognite/client/_api/data_modeling/containers.py b/cognite/client/_api/data_modeling/containers.py index a5329ef52a..4a2a5c76b8 100644 --- a/cognite/client/_api/data_modeling/containers.py +++ b/cognite/client/_api/data_modeling/containers.py @@ -72,13 +72,13 @@ async def __call__( Fetches containers as they are iterated over, so you keep a limited number of containers in memory. Args: - chunk_size (int | None): Number of containers to return in each chunk. Defaults to yielding one container a time. - space (str | None): The space to query. - include_global (bool): Whether the global containers should be returned. - limit (int | None): Maximum number of containers to return. Defaults to returning all items. + chunk_size: Number of containers to return in each chunk. Defaults to yielding one container a time. + space: The space to query. + include_global: Whether the global containers should be returned. + limit: Maximum number of containers to return. Defaults to returning all items. Yields: - Container | ContainerList: yields Container one by one if chunk_size is not specified, else ContainerList objects. + yields Container one by one if chunk_size is not specified, else ContainerList objects. """ # noqa: DOC404 flt = _ContainerFilter(space, include_global) async for item in self._list_generator( @@ -104,10 +104,10 @@ async def retrieve( """`Retrieve one or more container by id(s). `_ Args: - ids (ContainerIdentifier | Sequence[ContainerIdentifier]): Identifier for container(s). + ids: Identifier for container(s). Returns: - Container | ContainerList | None: Requested container or None if it does not exist. + Requested container or None if it does not exist. Examples: @@ -134,9 +134,9 @@ async def delete(self, ids: ContainerIdentifier | Sequence[ContainerIdentifier]) """`Delete one or more containers `_ Args: - ids (ContainerIdentifier | Sequence[ContainerIdentifier]): The container identifier(s). + ids: The container identifier(s). Returns: - list[ContainerId]: The container(s) which has been deleted. Empty list if nothing was deleted. + The container(s) which has been deleted. Empty list if nothing was deleted. Examples: Delete containers by id: @@ -161,9 +161,9 @@ async def delete_constraints(self, ids: Sequence[ConstraintIdentifier]) -> list[ """`Delete one or more constraints `_ Args: - ids (Sequence[ConstraintIdentifier]): The constraint identifier(s). + ids: The constraint identifier(s). Returns: - list[ConstraintIdentifier]: The constraints(s) which have been deleted. + The constraints(s) which have been deleted. Examples: Delete constraints by id: @@ -181,9 +181,9 @@ async def delete_indexes(self, ids: Sequence[IndexIdentifier]) -> list[IndexIden """`Delete one or more indexes `_ Args: - ids (Sequence[IndexIdentifier]): The index identifier(s). + ids: The index identifier(s). Returns: - list[IndexIdentifier]: The indexes(s) which has been deleted. + The indexes(s) which has been deleted. Examples: Delete indexes by id: @@ -229,12 +229,12 @@ async def list( """`List containers `_ Args: - space (str | None): The space to query - limit (int | None): Maximum number of containers to return. Defaults to 10. Set to -1, float("inf") or None to return all items. - include_global (bool): Whether the global containers should be returned. + space: The space to query + limit: Maximum number of containers to return. Defaults to 10. Set to -1, float("inf") or None to return all items. + include_global: Whether the global containers should be returned. Returns: - ContainerList: List of requested containers + List of requested containers Examples: @@ -275,10 +275,10 @@ async def apply(self, container: ContainerApply | Sequence[ContainerApply]) -> C """`Add or update (upsert) containers. `_ Args: - container (ContainerApply | Sequence[ContainerApply]): Container(s) to create or update. + container: Container(s) to create or update. Returns: - Container | ContainerList: Created container(s) + Created container(s) Examples: diff --git a/cognite/client/_api/data_modeling/data_models.py b/cognite/client/_api/data_modeling/data_models.py index 28d63b7105..0f715759be 100644 --- a/cognite/client/_api/data_modeling/data_models.py +++ b/cognite/client/_api/data_modeling/data_models.py @@ -73,15 +73,15 @@ async def __call__( Fetches data model as they are iterated over, so you keep a limited number of data model in memory. Args: - chunk_size (int | None): Number of data model to return in each chunk. Defaults to yielding one data_model a time. - limit (int | None): Maximum number of data model to return. Defaults to returning all items. - space (str | None): The space to query. - inline_views (bool): Whether to expand the referenced views inline in the returned result. - all_versions (bool): Whether to return all versions. If false, only the newest version is returned, which is determined based on the 'createdTime' field. - include_global (bool): Whether to include global views. + chunk_size: Number of data model to return in each chunk. Defaults to yielding one data_model a time. + limit: Maximum number of data model to return. Defaults to returning all items. + space: The space to query. + inline_views: Whether to expand the referenced views inline in the returned result. + all_versions: Whether to return all versions. If false, only the newest version is returned, which is determined based on the 'createdTime' field. + include_global: Whether to include global views. Yields: - DataModel | DataModelList: yields DataModel one by one if chunk_size is not specified, else DataModelList objects. + yields DataModel one by one if chunk_size is not specified, else DataModelList objects. """ # noqa: DOC404 filter = DataModelFilter(space, inline_views, all_versions, include_global) @@ -112,11 +112,11 @@ async def retrieve( """`Retrieve data_model(s) by id(s). `_ Args: - ids (DataModelIdentifier | Sequence[DataModelIdentifier]): Data Model identifier(s). - inline_views (bool): Whether to expand the referenced views inline in the returned result. + ids: Data Model identifier(s). + inline_views: Whether to expand the referenced views inline in the returned result. Returns: - DataModelList[ViewId] | DataModelList[View]: Requested data model(s) or empty if none exist. + Requested data model(s) or empty if none exist. Examples: @@ -138,9 +138,9 @@ async def delete(self, ids: DataModelIdentifier | Sequence[DataModelIdentifier]) """`Delete one or more data model `_ Args: - ids (DataModelIdentifier | Sequence[DataModelIdentifier]): Data Model identifier(s). + ids: Data Model identifier(s). Returns: - list[DataModelId]: The data_model(s) which has been deleted. None if nothing was deleted. + The data_model(s) which has been deleted. None if nothing was deleted. Examples: Delete data model by id: @@ -192,14 +192,14 @@ async def list( """`List data models `_ Args: - inline_views (bool): Whether to expand the referenced views inline in the returned result. - limit (int | None): Maximum number of data model to return. Defaults to 10. Set to -1, float("inf") or None to return all items. - space (str | None): The space to query. - all_versions (bool): Whether to return all versions. If false, only the newest version is returned, which is determined based on the 'createdTime' field. - include_global (bool): Whether to include global data models. + inline_views: Whether to expand the referenced views inline in the returned result. + limit: Maximum number of data model to return. Defaults to 10. Set to -1, float("inf") or None to return all items. + space: The space to query. + all_versions: Whether to return all versions. If false, only the newest version is returned, which is determined based on the 'createdTime' field. + include_global: Whether to include global data models. Returns: - DataModelList[View] | DataModelList[ViewId]: List of requested data models + List of requested data models Examples: @@ -241,10 +241,10 @@ async def apply(self, data_model: DataModelApply | Sequence[DataModelApply]) -> """`Create or update one or more data model. `_ Args: - data_model (DataModelApply | Sequence[DataModelApply]): Data model(s) to create or update (upsert). + data_model: Data model(s) to create or update (upsert). Returns: - DataModel | DataModelList: Created data model(s) + Created data model(s) Examples: diff --git a/cognite/client/_api/data_modeling/graphql.py b/cognite/client/_api/data_modeling/graphql.py index 58d0943c36..9c5e32d1c2 100644 --- a/cognite/client/_api/data_modeling/graphql.py +++ b/cognite/client/_api/data_modeling/graphql.py @@ -39,10 +39,10 @@ async def _unsafely_wipe_and_regenerate_dml(self, id: DataModelIdentifier) -> st This removes all comments from the DML. Args: - id (DataModelIdentifier): The data model to apply DML to. + id: The data model to apply DML to. Returns: - str: The new DML + The new DML """ graphql_body = """ query WipeAndRegenerateDml($space: String!, $externalId: String!, $version: String!) { @@ -82,14 +82,14 @@ async def apply_dml( """Apply the DML for a given data model. Args: - id (DataModelIdentifier): The data model to apply DML to. - dml (str): The DML to apply. - name (str | None): The name of the data model. - description (str | None): The description of the data model. - previous_version (str | None): The previous version of the data model. Specify to reuse view versions from previous data model version. + id: The data model to apply DML to. + dml: The DML to apply. + name: The name of the data model. + description: The description of the data model. + previous_version: The previous version of the data model. Specify to reuse view versions from previous data model version. Returns: - DMLApplyResult: The id of the updated data model. + The id of the updated data model. Examples: @@ -163,12 +163,12 @@ async def query( """Execute a GraphQl query against a given data model. Args: - id (DataModelIdentifier): The data model to query. - query (str): The query to issue. - variables (dict[str, Any] | None): An optional dict of variables to pass to the query. + id: The data model to query. + query: The query to issue. + variables: An optional dict of variables to pass to the query. Returns: - dict[str, Any]: The query result + The query result Examples: diff --git a/cognite/client/_api/data_modeling/instances.py b/cognite/client/_api/data_modeling/instances.py index af949c4665..c6b652cfd5 100644 --- a/cognite/client/_api/data_modeling/instances.py +++ b/cognite/client/_api/data_modeling/instances.py @@ -257,18 +257,18 @@ async def __call__( Fetches instances as they are iterated over, so you keep a limited number of instances in memory. Args: - chunk_size (int | None): Number of data_models to return in each chunk. Defaults to yielding one instance at a time. - instance_type (Literal['node', 'edge']): Whether to query for nodes or edges. - limit (int | None): Maximum number of instances to return. Defaults to returning all items. - include_typing (bool): Whether to return property type information as part of the result. - sources (Source | Sequence[Source] | None): Views to retrieve properties from. - space (str | SequenceNotStr[str] | None): Only return instances in the given space (or list of spaces). - sort (list[InstanceSort | dict] | InstanceSort | dict | None): Sort(s) to apply to the returned instances. For nontrivial amounts of data, you need to have a backing, cursorable index. - filter (Filter | dict[str, Any] | None): Advanced filtering of instances. - debug (DebugParameters | None): Debug settings for profiling and troubleshooting. + chunk_size: Number of data_models to return in each chunk. Defaults to yielding one instance at a time. + instance_type: Whether to query for nodes or edges. + limit: Maximum number of instances to return. Defaults to returning all items. + include_typing: Whether to return property type information as part of the result. + sources: Views to retrieve properties from. + space: Only return instances in the given space (or list of spaces). + sort: Sort(s) to apply to the returned instances. For nontrivial amounts of data, you need to have a backing, cursorable index. + filter: Advanced filtering of instances. + debug: Debug settings for profiling and troubleshooting. Yields: - Edge | EdgeList | Node | NodeList: yields Instance one by one if chunk_size is not specified, else NodeList/EdgeList objects. + yields Instance one by one if chunk_size is not specified, else NodeList/EdgeList objects. """ self._validate_filter(filter) filter = self._merge_space_into_filter(instance_type, space, filter) @@ -369,13 +369,13 @@ async def retrieve_edges( Args: - edges (EdgeId | Sequence[EdgeId] | tuple[str, str] | Sequence[tuple[str, str]]): Edge id(s) to retrieve. - edge_cls (type[T_Edge]): The custom edge class to use, the retrieved edges will automatically be serialized into this class. - sources (Source | Sequence[Source] | None): Retrieve properties from the listed - by reference - views. This only applies if you do not provide a custom edge class. - include_typing (bool): Whether to include typing information + edges: Edge id(s) to retrieve. + edge_cls: The custom edge class to use, the retrieved edges will automatically be serialized into this class. + sources: Retrieve properties from the listed - by reference - views. This only applies if you do not provide a custom edge class. + include_typing: Whether to include typing information Returns: - EdgeList[T_Edge] | T_Edge | Edge | None: The requested edges. + The requested edges. Examples: @@ -473,13 +473,13 @@ async def retrieve_nodes( built-in Node class. Args: - nodes (NodeId | Sequence[NodeId] | tuple[str, str] | Sequence[tuple[str, str]]): Node id(s) to retrieve. - node_cls (type[T_Node]): The custom node class to use, the retrieved nodes will automatically be serialized to this class. - sources (Source | Sequence[Source] | None): Retrieve properties from the listed - by reference - views. This only applies if you do not provide a custom node class. - include_typing (bool): Whether to include typing information + nodes: Node id(s) to retrieve. + node_cls: The custom node class to use, the retrieved nodes will automatically be serialized to this class. + sources: Retrieve properties from the listed - by reference - views. This only applies if you do not provide a custom node class. + include_typing: Whether to include typing information Returns: - NodeList[T_Node] | T_Node | Node | None: The requested edges. + The requested edges. Examples: @@ -544,13 +544,13 @@ async def retrieve( """`Retrieve one or more instance by id(s). `_ Args: - nodes (NodeId | Sequence[NodeId] | tuple[str, str] | Sequence[tuple[str, str]] | None): Node ids - edges (EdgeId | Sequence[EdgeId] | tuple[str, str] | Sequence[tuple[str, str]] | None): Edge ids - sources (Source | Sequence[Source] | None): Retrieve properties from the listed - by reference - views. - include_typing (bool): Whether to return property type information as part of the result. + nodes: Node ids + edges: Edge ids + sources: Retrieve properties from the listed - by reference - views. + include_typing: Whether to return property type information as part of the result. Returns: - InstancesResult[Node, Edge]: Requested instances. + Requested instances. Examples: @@ -692,11 +692,11 @@ async def delete( """`Delete one or more instances `_ Args: - nodes (NodeId | Sequence[NodeId] | tuple[str, str] | Sequence[tuple[str, str]] | None): Node ids - edges (EdgeId | Sequence[EdgeId] | tuple[str, str] | Sequence[tuple[str, str]] | None): Edge ids + nodes: Node ids + edges: Edge ids Returns: - InstancesDeleteResult: The instance ID(s) that was deleted. Empty list if nothing was deleted. + The instance ID(s) that was deleted. Empty list if nothing was deleted. Examples: @@ -745,13 +745,13 @@ async def inspect( This method will return the involved views and containers for the given nodes and edges. Args: - nodes (NodeId | Sequence[NodeId] | tuple[str, str] | Sequence[tuple[str, str]] | None): Node IDs. - edges (EdgeId | Sequence[EdgeId] | tuple[str, str] | Sequence[tuple[str, str]] | None): Edge IDs. - involved_views (InvolvedViews | None): Whether to include involved views. Must pass at least one of involved_views or involved_containers. - involved_containers (InvolvedContainers | None): Whether to include involved containers. Must pass at least one of involved_views or involved_containers. + nodes: Node IDs. + edges: Edge IDs. + involved_views: Whether to include involved views. Must pass at least one of involved_views or involved_containers. + involved_containers: Whether to include involved containers. Must pass at least one of involved_views or involved_containers. Returns: - InstanceInspectResults: List of instance inspection results. + List of instance inspection results. Examples: @@ -817,13 +817,13 @@ async def subscribe( see :ref:`this example of syncing instances to a local SQLite database `. Args: - query (QuerySync): The query to subscribe to. - callback (Callable[[QueryResult], None | Awaitable[None]]): The callback function to call when the result set changes. Can be a regular or async function. - poll_delay_seconds (float): The time to wait between polls when no data is present. Defaults to 30 seconds. - throttle_seconds (float): The time to wait between polls despite data being present. + query: The query to subscribe to. + callback: The callback function to call when the result set changes. Can be a regular or async function. + poll_delay_seconds: The time to wait between polls when no data is present. Defaults to 30 seconds. + throttle_seconds: The time to wait between polls despite data being present. Returns: - SubscriptionContext: An object that can be used to inspect and cancel the subscription. + An object that can be used to inspect and cancel the subscription. Examples: @@ -955,16 +955,16 @@ async def apply( """`Add or update (upsert) instances. `_ Args: - nodes (NodeApply | Sequence[NodeApply] | None): Nodes to apply - edges (EdgeApply | Sequence[EdgeApply] | None): Edges to apply - auto_create_start_nodes (bool): Whether to create missing start nodes for edges when ingesting. By default, the start node of an edge must exist before it can be ingested. - auto_create_end_nodes (bool): Whether to create missing end nodes for edges when ingesting. By default, the end node of an edge must exist before it can be ingested. - auto_create_direct_relations (bool): Whether to create missing direct relation targets when ingesting. - skip_on_version_conflict (bool): If existingVersion is specified on any of the nodes/edges in the input, the default behaviour is that the entire ingestion will fail when version conflicts occur. If skipOnVersionConflict is set to true, items with version conflicts will be skipped instead. If no version is specified for nodes/edges, it will do the writing directly. - replace (bool): How do we behave when a property value exists? Do we replace all matching and existing values with the supplied values (true)? Or should we merge in new values for properties together with the existing values (false)? Note: This setting applies for all nodes or edges specified in the ingestion call. + nodes: Nodes to apply + edges: Edges to apply + auto_create_start_nodes: Whether to create missing start nodes for edges when ingesting. By default, the start node of an edge must exist before it can be ingested. + auto_create_end_nodes: Whether to create missing end nodes for edges when ingesting. By default, the end node of an edge must exist before it can be ingested. + auto_create_direct_relations: Whether to create missing direct relation targets when ingesting. + skip_on_version_conflict: If existingVersion is specified on any of the nodes/edges in the input, the default behaviour is that the entire ingestion will fail when version conflicts occur. If skipOnVersionConflict is set to true, items with version conflicts will be skipped instead. If no version is specified for nodes/edges, it will do the writing directly. + replace: How do we behave when a property value exists? Do we replace all matching and existing values with the supplied values (true)? Or should we merge in new values for properties together with the existing values (false)? Note: This setting applies for all nodes or edges specified in the ingestion call. Returns: - InstancesApplyResult: Created instance(s) + Created instance(s) Examples: @@ -1164,24 +1164,20 @@ async def search( """`Search instances `_ Args: - view (ViewId): View to search in. - query (str | None): Query string that will be parsed and used for search. - instance_type (Literal['node', 'edge'] | type[T_Node] | type[T_Edge]): Whether to search for nodes or edges. You can also pass a custom typed node (or edge class) inheriting from TypedNode (or TypedEdge). See apply, retrieve_nodes or retrieve_edges for an example. - properties (list[str] | None): Optional array of properties you want to search through. If you do not specify one or more properties, the service will search all text fields within the view. - target_units (list[TargetUnit] | None): Properties to convert to another unit. The API can only convert to another unit if a unit has been defined as part of the type on the underlying container being queried. - space (str | SequenceNotStr[str] | None): Restrict instance search to the given space (or list of spaces). - filter (Filter | dict[str, Any] | None): Advanced filtering of instances. - include_typing (bool): Whether to include typing information. - limit (int | None): Maximum number of instances to return. Defaults to 25. Will return the maximum number - of results (1000) if set to None, -1, or math.inf. - sort (Sequence[InstanceSort | dict] | InstanceSort | dict | None): How you want the listed instances information ordered. - operator (Literal['AND', 'OR']): Controls how multiple search terms are combined when matching documents. - AND (default): A document matches only if it contains all of the query terms across the searchable fields. - This typically returns fewer results but with higher relevance. OR: A document matches if it contains any - of the query terms in the searchable fields. This typically returns more results but with lower precision. + view: View to search in. + query: Query string that will be parsed and used for search. + instance_type: Whether to search for nodes or edges. You can also pass a custom typed node (or edge class) inheriting from TypedNode (or TypedEdge). See apply, retrieve_nodes or retrieve_edges for an example. + properties: Optional array of properties you want to search through. If you do not specify one or more properties, the service will search all text fields within the view. + target_units: Properties to convert to another unit. The API can only convert to another unit if a unit has been defined as part of the type on the underlying container being queried. + space: Restrict instance search to the given space (or list of spaces). + filter: Advanced filtering of instances. + include_typing: Whether to include typing information. + limit: Maximum number of instances to return. Defaults to 25. Will return the maximum number of results (1000) if set to None, -1, or math.inf. + sort: How you want the listed instances information ordered. + operator: Controls how multiple search terms are combined when matching documents. AND (default): A document matches only if it contains all of the query terms across the searchable fields. This typically returns fewer results but with higher relevance. OR: A document matches if it contains any of the query terms in the searchable fields. This typically returns more results but with lower precision. Returns: - NodeList[T_Node] | EdgeList[T_Edge]: Search result with matching nodes or edges. + Search result with matching nodes or edges. Examples: @@ -1330,20 +1326,19 @@ async def aggregate( """`Aggregate data across nodes/edges `_ Args: - view (ViewId): View to aggregate over. - aggregates (MetricAggregation | dict | Sequence[MetricAggregation | dict]): The properties to aggregate over. - group_by (str | SequenceNotStr[str] | None): The selection of fields to group the results by when doing aggregations. You can specify up to 5 items to group by. - instance_type (Literal['node', 'edge']): The type of instance. - query (str | None): Optional query string. The API will parse the query string, and use it to match the text properties on elements to use for the aggregate(s). - properties (str | SequenceNotStr[str] | None): Optional list of properties you want to apply the query to. If you do not list any properties, you search through text fields by default. - target_units (list[TargetUnit] | None): Properties to convert to another unit. The API can only convert to another unit if a unit has been defined as part of the type on the underlying container being queried. - space (str | SequenceNotStr[str] | None): Restrict instance aggregate query to the given space (or list of spaces). - filter (Filter | dict[str, Any] | None): Advanced filtering of instances. - limit (int | None): Maximum number of instances to return. Defaults to 25. Will return the maximum number - of results (1000) if set to None, -1, or math.inf. + view: View to aggregate over. + aggregates: The properties to aggregate over. + group_by: The selection of fields to group the results by when doing aggregations. You can specify up to 5 items to group by. + instance_type: The type of instance. + query: Optional query string. The API will parse the query string, and use it to match the text properties on elements to use for the aggregate(s). + properties: Optional list of properties you want to apply the query to. If you do not list any properties, you search through text fields by default. + target_units: Properties to convert to another unit. The API can only convert to another unit if a unit has been defined as part of the type on the underlying container being queried. + space: Restrict instance aggregate query to the given space (or list of spaces). + filter: Advanced filtering of instances. + limit: Maximum number of instances to return. Defaults to 25. Will return the maximum number of results (1000) if set to None, -1, or math.inf. Returns: - AggregatedNumberedValue | list[AggregatedNumberedValue] | InstanceAggregationResultList: Node or edge aggregation results. + Node or edge aggregation results. Examples: @@ -1441,18 +1436,18 @@ async def histogram( """`Produces histograms for nodes/edges `_ Args: - view (ViewId): View to to aggregate over. - histograms (Histogram | Sequence[Histogram]): The properties to aggregate over. - instance_type (Literal['node', 'edge']): Whether to search for nodes or edges. - query (str | None): Query string that will be parsed and used for search. - properties (SequenceNotStr[str] | None): Optional array of properties you want to search through. If you do not specify one or more properties, the service will search all text fields within the view. - target_units (list[TargetUnit] | None): Properties to convert to another unit. The API can only convert to another unit if a unit has been defined as part of the type on the underlying container being queried. - space (str | SequenceNotStr[str] | None): Restrict histogram query to instances in the given space (or list of spaces). - filter (Filter | dict[str, Any] | None): Advanced filtering of instances. - limit (int): Maximum number of instances to return. Defaults to 25. + view: View to to aggregate over. + histograms: The properties to aggregate over. + instance_type: Whether to search for nodes or edges. + query: Query string that will be parsed and used for search. + properties: Optional array of properties you want to search through. If you do not specify one or more properties, the service will search all text fields within the view. + target_units: Properties to convert to another unit. The API can only convert to another unit if a unit has been defined as part of the type on the underlying container being queried. + space: Restrict histogram query to instances in the given space (or list of spaces). + filter: Advanced filtering of instances. + limit: Maximum number of instances to return. Defaults to 25. Returns: - HistogramValue | list[HistogramValue]: Node or edge aggregation results. + Node or edge aggregation results. Examples: @@ -1513,12 +1508,12 @@ async def query( recursive edge traversal, chaining of result sets, and granular property selection. Args: - query (Query): Query. - include_typing (bool): Should we return property type information as part of the result? - debug (DebugParameters | None): Debug settings for profiling and troubleshooting. + query: Query. + include_typing: Should we return property type information as part of the result? + debug: Debug settings for profiling and troubleshooting. Returns: - QueryResult: The resulting nodes and/or edges from the query. + The resulting nodes and/or edges from the query. Examples: @@ -1587,12 +1582,12 @@ async def sync( Subscribe to changes for nodes and edges in a project, matching a supplied filter. Args: - query (QuerySync): Query. - include_typing (bool): Should we return property type information as part of the result? - debug (DebugParameters | None): Debug settings for profiling and troubleshooting. + query: Query. + include_typing: Should we return property type information as part of the result? + debug: Debug settings for profiling and troubleshooting. Returns: - QueryResult: The resulting nodes and/or edges from the query. + The resulting nodes and/or edges from the query. Examples: @@ -1734,17 +1729,17 @@ async def list( """`List instances `_ Args: - instance_type (Literal['node', 'edge'] | type[T_Node] | type[T_Edge]): Whether to query for nodes or edges. You can also pass a custom typed node (or edge class) inheriting from TypedNode (or TypedEdge). See apply, retrieve_nodes or retrieve_edges for an example. - include_typing (bool): Whether to return property type information as part of the result. - sources (Source | Sequence[Source] | None): Views to retrieve properties from. - space (str | SequenceNotStr[str] | None): Only return instances in the given space (or list of spaces). - limit (int | None): Maximum number of instances to return. Defaults to 25. Set to -1, float("inf") or None to return all items. - sort (Sequence[InstanceSort | dict] | InstanceSort | dict | None): How you want the listed instances information ordered. - filter (Filter | dict[str, Any] | None): Advanced filtering of instances. - debug (DebugParameters | None): Debug settings for profiling and troubleshooting. + instance_type: Whether to query for nodes or edges. You can also pass a custom typed node (or edge class) inheriting from TypedNode (or TypedEdge). See apply, retrieve_nodes or retrieve_edges for an example. + include_typing: Whether to return property type information as part of the result. + sources: Views to retrieve properties from. + space: Only return instances in the given space (or list of spaces). + limit: Maximum number of instances to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + sort: How you want the listed instances information ordered. + filter: Advanced filtering of instances. + debug: Debug settings for profiling and troubleshooting. Returns: - NodeList[T_Node] | EdgeList[T_Edge]: List of requested instances + List of requested instances Examples: diff --git a/cognite/client/_api/data_modeling/space_statistics.py b/cognite/client/_api/data_modeling/space_statistics.py index fd50a31b14..3d7ccccb3b 100644 --- a/cognite/client/_api/data_modeling/space_statistics.py +++ b/cognite/client/_api/data_modeling/space_statistics.py @@ -44,10 +44,10 @@ async def retrieve( """`Retrieve usage data and limits per space `_ Args: - space (str | SequenceNotStr[str]): The space or spaces to retrieve statistics for. + space: The space or spaces to retrieve statistics for. Returns: - SpaceStatistics | SpaceStatisticsList | None: The requested statistics and limits for the specified space(s). + The requested statistics and limits for the specified space(s). Examples: @@ -78,7 +78,7 @@ async def list(self) -> SpaceStatisticsList: Returns statistics for data modeling resources grouped by each space in the project. Returns: - SpaceStatisticsList: The requested statistics and limits for all spaces in the project. + The requested statistics and limits for all spaces in the project. Examples: diff --git a/cognite/client/_api/data_modeling/spaces.py b/cognite/client/_api/data_modeling/spaces.py index ebda5c59eb..f65e43b26a 100644 --- a/cognite/client/_api/data_modeling/spaces.py +++ b/cognite/client/_api/data_modeling/spaces.py @@ -46,11 +46,11 @@ async def __call__( Fetches spaces as they are iterated over, so you keep a limited number of spaces in memory. Args: - chunk_size (int | None): Number of spaces to return in each chunk. Defaults to yielding one space a time. - limit (int | None): Maximum number of spaces to return. Defaults to returning all items. + chunk_size: Number of spaces to return in each chunk. Defaults to yielding one space a time. + limit: Maximum number of spaces to return. Defaults to returning all items. Yields: - Space | SpaceList: yields Space one by one if chunk_size is not specified, else SpaceList objects. + yields Space one by one if chunk_size is not specified, else SpaceList objects. """ # noqa: DOC404 async for item in self._list_generator( list_cls=SpaceList, @@ -72,10 +72,10 @@ async def retrieve(self, spaces: str | SequenceNotStr[str]) -> Space | SpaceList """`Retrieve one or more spaces. `_ Args: - spaces (str | SequenceNotStr[str]): Space ID + spaces: Space ID Returns: - Space | SpaceList | None: Requested space or None if it does not exist. + Requested space or None if it does not exist. Examples: @@ -101,9 +101,9 @@ async def delete(self, spaces: str | SequenceNotStr[str]) -> list[str]: """`Delete one or more spaces `_ Args: - spaces (str | SequenceNotStr[str]): ID or ID list ids of spaces. + spaces: ID or ID list ids of spaces. Returns: - list[str]: The space(s) which has been deleted. + The space(s) which has been deleted. Examples: Delete spaces by id: @@ -132,11 +132,11 @@ async def list( """`List spaces `_ Args: - limit (int | None): Maximum number of spaces to return. Defaults to 10. Set to -1, float("inf") or None to return all items. - include_global (bool): Whether to include global spaces. Defaults to False. + limit: Maximum number of spaces to return. Defaults to 10. Set to -1, float("inf") or None to return all items. + include_global: Whether to include global spaces. Defaults to False. Returns: - SpaceList: List of requested spaces + List of requested spaces Examples: @@ -176,10 +176,10 @@ async def apply(self, spaces: SpaceApply | Sequence[SpaceApply]) -> Space | Spac """`Create or patch one or more spaces. `_ Args: - spaces (SpaceApply | Sequence[SpaceApply]): Space | Sequence[Space]): Space or spaces of spacesda to create or update. + spaces: Space or spaces of spacesda to create or update. Returns: - Space | SpaceList: Created space(s) + Created space(s) Examples: diff --git a/cognite/client/_api/data_modeling/statistics.py b/cognite/client/_api/data_modeling/statistics.py index ea5264841d..7407670c67 100644 --- a/cognite/client/_api/data_modeling/statistics.py +++ b/cognite/client/_api/data_modeling/statistics.py @@ -33,7 +33,7 @@ async def project(self) -> ProjectStatistics: Returns the usage data and limits for a project's data modelling usage, including data model schemas and graph instances Returns: - ProjectStatistics: The requested statistics and limits + The requested statistics and limits Examples: diff --git a/cognite/client/_api/data_modeling/views.py b/cognite/client/_api/data_modeling/views.py index c21b97fe2c..c99120a8a5 100644 --- a/cognite/client/_api/data_modeling/views.py +++ b/cognite/client/_api/data_modeling/views.py @@ -72,15 +72,15 @@ async def __call__( Fetches views as they are iterated over, so you keep a limited number of views in memory. Args: - chunk_size (int | None): Number of views to return in each chunk. Defaults to yielding one view at a time. - limit (int | None): Maximum number of views to return. Defaults to returning all items. - space (str | None): (str | None): The space to query. - include_inherited_properties (bool): Whether to include properties inherited from views this view implements. - all_versions (bool): Whether to return all versions. If false, only the newest version is returned, which is determined based on the 'createdTime' field. - include_global (bool): Whether to include global views. + chunk_size: Number of views to return in each chunk. Defaults to yielding one view at a time. + limit: Maximum number of views to return. Defaults to returning all items. + space: The space to query. + include_inherited_properties: Whether to include properties inherited from views this view implements. + all_versions: Whether to return all versions. If false, only the newest version is returned, which is determined based on the 'createdTime' field. + include_global: Whether to include global views. Yields: - View | ViewList: yields View one by one if chunk_size is not specified, else ViewList objects. + yields View one by one if chunk_size is not specified, else ViewList objects. """ # noqa: DOC404 filter_ = ViewFilter(space, include_inherited_properties, all_versions, include_global) async for item in self._list_generator( @@ -109,15 +109,12 @@ async def retrieve( """`Retrieve a single view by id. `_ Args: - ids (ViewIdentifier | Sequence[ViewIdentifier]): The view identifier(s). This can be given as a tuple of - strings or a ViewId object. For example, ("my_space", "my_view"), ("my_space", "my_view", "my_version"), - or ViewId("my_space", "my_view", "my_version"). Note that version is optional, if not provided, all versions - will be returned. - include_inherited_properties (bool): Whether to include properties inherited from views this view implements. - all_versions (bool): Whether to return all versions. If false, only the newest version is returned (based on created_time) + ids: The view identifier(s). This can be given as a tuple of strings or a ViewId object. For example, ("my_space", "my_view"), ("my_space", "my_view", "my_version"), or ViewId("my_space", "my_view", "my_version"). Note that version is optional, if not provided, all versions will be returned. + include_inherited_properties: Whether to include properties inherited from views this view implements. + all_versions: Whether to return all versions. If false, only the newest version is returned (based on created_time) Returns: - ViewList: Requested view or None if it does not exist. + Requested view or None if it does not exist. Examples: @@ -144,9 +141,9 @@ async def delete(self, ids: ViewIdentifier | Sequence[ViewIdentifier]) -> list[V """`Delete one or more views `_ Args: - ids (ViewIdentifier | Sequence[ViewIdentifier]): View identifier(s) + ids: View identifier(s) Returns: - list[ViewId]: The identifier for the view(s) which has been deleted. Empty list if nothing was deleted. + The identifier for the view(s) which has been deleted. Empty list if nothing was deleted. Examples: Delete views by id: @@ -178,14 +175,14 @@ async def list( """`List views `_ Args: - limit (int | None): Maximum number of views to return. Defaults to 10. Set to -1, float("inf") or None to return all items. - space (str | None): (str | None): The space to query. - include_inherited_properties (bool): Whether to include properties inherited from views this view implements. - all_versions (bool): Whether to return all versions. If false, only the newest version is returned, which is determined based on the 'createdTime' field. - include_global (bool): Whether to include global views. + limit: Maximum number of views to return. Defaults to 10. Set to -1, float("inf") or None to return all items. + space: The space to query. + include_inherited_properties: Whether to include properties inherited from views this view implements. + all_versions: Whether to return all versions. If false, only the newest version is returned, which is determined based on the 'createdTime' field. + include_global: Whether to include global views. Returns: - ViewList: List of requested views + List of requested views Examples: @@ -227,10 +224,10 @@ async def apply(self, view: ViewApply | Sequence[ViewApply]) -> View | ViewList: """`Create or update (upsert) one or more views. `_ Args: - view (ViewApply | Sequence[ViewApply]): View(s) to create or update. + view: View(s) to create or update. Returns: - View | ViewList: Created view(s) + Created view(s) Examples: diff --git a/cognite/client/_api/data_sets.py b/cognite/client/_api/data_sets.py index d1208850e8..cbb422b5cd 100644 --- a/cognite/client/_api/data_sets.py +++ b/cognite/client/_api/data_sets.py @@ -67,16 +67,16 @@ async def __call__( Fetches data sets as they are iterated over, so you keep a limited number of data sets in memory. Args: - chunk_size (int | None): Number of data sets to return in each chunk. Defaults to yielding one data set a time. - metadata (dict[str, str] | None): Custom, application-specific metadata. String key -> String value. - created_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. - last_updated_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. - external_id_prefix (str | None): Filter by this (case-sensitive) prefix for the external ID. - write_protected (bool | None): Specify whether the filtered data sets are write-protected, or not. Set to True to only list write-protected data sets. - limit (int | None): Maximum number of data sets to return. Defaults to return all items. + chunk_size: Number of data sets to return in each chunk. Defaults to yielding one data set a time. + metadata: Custom, application-specific metadata. String key -> String value. + created_time: Range between two timestamps. + last_updated_time: Range between two timestamps. + external_id_prefix: Filter by this (case-sensitive) prefix for the external ID. + write_protected: Specify whether the filtered data sets are write-protected, or not. Set to True to only list write-protected data sets. + limit: Maximum number of data sets to return. Defaults to return all items. Yields: - DataSet | DataSetList: yields DataSet one by one if chunk is not specified, else DataSetList objects. + yields DataSet one by one if chunk is not specified, else DataSetList objects. """ # noqa: DOC404 filter = DataSetFilter( metadata=metadata, @@ -102,10 +102,10 @@ async def create( """`Create one or more data sets. `_ Args: - data_set (DataSet | DataSetWrite | Sequence[DataSet] | Sequence[DataSetWrite]): Union[DataSet, Sequence[DataSet]]: Data set or list of data sets to create. + data_set: Union[DataSet, Sequence[DataSet]]: Data set or list of data sets to create. Returns: - DataSet | DataSetList: Created data set(s) + Created data set(s) Examples: @@ -126,11 +126,11 @@ async def retrieve(self, id: int | None = None, external_id: str | None = None) """`Retrieve a single data set by id. `_ Args: - id (int | None): ID - external_id (str | None): External ID + id: ID + external_id: External ID Returns: - DataSet | None: Requested data set or None if it does not exist. + Requested data set or None if it does not exist. Examples: @@ -157,12 +157,12 @@ async def retrieve_multiple( """`Retrieve multiple data sets by id. `_ Args: - ids (Sequence[int] | None): IDs - external_ids (SequenceNotStr[str] | None): External IDs - ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception. + ids: IDs + external_ids: External IDs + ignore_unknown_ids: Ignore IDs and external IDs that are not found rather than throw an exception. Returns: - DataSetList: The requested data sets. + The requested data sets. Examples: @@ -186,10 +186,10 @@ async def aggregate_count(self, filter: DataSetFilter | dict[str, Any] | None = """`Aggregate data sets `_ Args: - filter (DataSetFilter | dict[str, Any] | None): Filter on data set filter with exact match + filter: Filter on data set filter with exact match Returns: - int: Count of data sets matching the filter. + Count of data sets matching the filter. Examples: @@ -226,11 +226,11 @@ async def update( """`Update one or more data sets `_ Args: - item (DataSet | DataSetWrite | DataSetUpdate | Sequence[DataSet | DataSetWrite | DataSetUpdate]): Data set(s) to update - mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (DataSet or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. + item: Data set(s) to update + mode: How to update data when a non-update object is given (DataSet or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. Returns: - DataSet | DataSetList: Updated data set(s) + Updated data set(s) Examples: @@ -265,15 +265,15 @@ async def list( """`List data sets `_ Args: - metadata (dict[str, str] | None): Custom, application-specific metadata. String key -> String value. - created_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. - last_updated_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. - external_id_prefix (str | None): Filter by this (case-sensitive) prefix for the external ID. - write_protected (bool | None): Specify whether the filtered data sets are write-protected, or not. Set to True to only list write-protected data sets. - limit (int | None): Maximum number of data sets to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + metadata: Custom, application-specific metadata. String key -> String value. + created_time: Range between two timestamps. + last_updated_time: Range between two timestamps. + external_id_prefix: Filter by this (case-sensitive) prefix for the external ID. + write_protected: Specify whether the filtered data sets are write-protected, or not. Set to True to only list write-protected data sets. + limit: Maximum number of data sets to return. Defaults to 25. Set to -1, float("inf") or None to return all items. Returns: - DataSetList: List of requested data sets + List of requested data sets Examples: diff --git a/cognite/client/_api/datapoints.py b/cognite/client/_api/datapoints.py index 5c25767080..b6aff2dd50 100644 --- a/cognite/client/_api/datapoints.py +++ b/cognite/client/_api/datapoints.py @@ -599,13 +599,13 @@ async def __call__( No empty chunk is ever returned. Args: - queries (DatapointsQuery | Sequence[DatapointsQuery]): Query, or queries, using id, external_id or instance_id for the time series to fetch data for, with individual settings specified. The options 'limit' and 'include_outside_points' are not supported when iterating. - chunk_size_datapoints (int): The number of datapoints per time series to yield per iteration. Must evenly divide 100k OR be an integer multiple of 100k. Default: 100_000. - chunk_size_time_series (int | None): The max number of time series to yield per iteration (varies as time series get exhausted, but is never empty). Default: None (all given queries are iterated at the same time). - return_arrays (bool): Whether to return the datapoints as numpy arrays. Default: True. + queries: Query, or queries, using id, external_id or instance_id for the time series to fetch data for, with individual settings specified. The options 'limit' and 'include_outside_points' are not supported when iterating. + chunk_size_datapoints: The number of datapoints per time series to yield per iteration. Must evenly divide 100k OR be an integer multiple of 100k. Default: 100_000. + chunk_size_time_series: The max number of time series to yield per iteration (varies as time series get exhausted, but is never empty). Default: None (all given queries are iterated at the same time). + return_arrays: Whether to return the datapoints as numpy arrays. Default: True. Yields: - DatapointsArray | DatapointsArrayList | Datapoints | DatapointsList: If return_arrays=True, a ``DatapointsArray`` object containing the datapoints chunk, or a ``DatapointsArrayList`` if multiple time series were asked for. When False, a ``Datapoints`` object containing the datapoints chunk, or a ``DatapointsList`` if multiple time series were asked for. + If return_arrays=True, a ``DatapointsArray`` object containing the datapoints chunk, or a ``DatapointsArrayList`` if multiple time series were asked for. When False, a ``Datapoints`` object containing the datapoints chunk, or a ``DatapointsList`` if multiple time series were asked for. Examples: @@ -1005,25 +1005,25 @@ async def retrieve( `status codes. `_ Args: - id (None | int | DatapointsQuery | Sequence[int | DatapointsQuery]): Id, dict (with id) or (mixed) sequence of these. See examples below. - external_id (None | str | DatapointsQuery | SequenceNotStr[str | DatapointsQuery]): External id, dict (with external id) or (mixed) sequence of these. See examples below. - instance_id (None | NodeId | DatapointsQuery | Sequence[NodeId | DatapointsQuery]): Instance id or sequence of instance ids. - start (int | str | datetime.datetime | None): Inclusive start. Default: 1970-01-01 UTC. - end (int | str | datetime.datetime | None): Exclusive end. Default: "now" - aggregates (Aggregate | str | list[Aggregate | str] | None): Single aggregate or list of aggregates to retrieve. Available options: ``average``, ``continuous_variance``, ``count``, ``count_bad``, ``count_good``, ``count_uncertain``, ``discrete_variance``, ``duration_bad``, ``duration_good``, ``duration_uncertain``, ``interpolation``, ``max``, ``max_datapoint``, ``min``, ``min_datapoint``, ``step_interpolation``, ``sum`` and ``total_variation``. Default: None (raw datapoints returned) - granularity (str | None): The granularity to fetch aggregates at. Can be given as an abbreviation or spelled out for clarity: ``s/second(s)``, ``m/minute(s)``, ``h/hour(s)``, ``d/day(s)``, ``w/week(s)``, ``mo/month(s)``, ``q/quarter(s)``, or ``y/year(s)``. Examples: ``30s``, ``5m``, ``1day``, ``2weeks``. Default: None. - timezone (str | datetime.timezone | ZoneInfo | None): For raw datapoints, which timezone to use when displaying (will not affect what is retrieved). For aggregates, which timezone to align to for granularity 'hour' and longer. Align to the start of the hour, day or month. For timezones of type Region/Location, like 'Europe/Oslo', pass a string or ``ZoneInfo`` instance. The aggregate duration will then vary, typically due to daylight saving time. You can also use a fixed offset from UTC by passing a string like '+04:00', 'UTC-7' or 'UTC-02:30' or an instance of ``datetime.timezone``. Note: Historical timezones with second offset are not supported, and timezones with minute offsets (e.g. UTC+05:30 or Asia/Kolkata) may take longer to execute. - target_unit (str | None): The unit_external_id of the datapoints returned. If the time series does not have a unit_external_id that can be converted to the target_unit, an error will be returned. Cannot be used with target_unit_system. - target_unit_system (str | None): The unit system of the datapoints returned. Cannot be used with target_unit. - limit (int | None): Maximum number of datapoints to return for each time series. Default: None (no limit) - include_outside_points (bool): Whether to include outside points. Not allowed when fetching aggregates. Default: False - ignore_unknown_ids (bool): Whether to ignore missing time series rather than raising an exception. Default: False - include_status (bool): Also return the status code, an integer, for each datapoint in the response. Only relevant for raw datapoint queries, and the object aggregates ``min_datapoint`` and ``max_datapoint``. - ignore_bad_datapoints (bool): Treat datapoints with a bad status code as if they do not exist. If set to false, raw queries will include bad datapoints in the response, and aggregates will in general omit the time period between a bad datapoint and the next good datapoint. Also, the period between a bad datapoint and the previous good datapoint will be considered constant. Default: True. - treat_uncertain_as_bad (bool): Treat datapoints with uncertain status codes as bad. If false, treat datapoints with uncertain status codes as good. Used for both raw queries and aggregates. Default: True. + id: Id, dict (with id) or (mixed) sequence of these. See examples below. + external_id: External id, dict (with external id) or (mixed) sequence of these. See examples below. + instance_id: Instance id or sequence of instance ids. + start: Inclusive start. Default: 1970-01-01 UTC. + end: Exclusive end. Default: "now" + aggregates: Single aggregate or list of aggregates to retrieve. Available options: ``average``, ``continuous_variance``, ``count``, ``count_bad``, ``count_good``, ``count_uncertain``, ``discrete_variance``, ``duration_bad``, ``duration_good``, ``duration_uncertain``, ``interpolation``, ``max``, ``max_datapoint``, ``min``, ``min_datapoint``, ``step_interpolation``, ``sum`` and ``total_variation``. Default: None (raw datapoints returned) + granularity: The granularity to fetch aggregates at. Can be given as an abbreviation or spelled out for clarity: ``s/second(s)``, ``m/minute(s)``, ``h/hour(s)``, ``d/day(s)``, ``w/week(s)``, ``mo/month(s)``, ``q/quarter(s)``, or ``y/year(s)``. Examples: ``30s``, ``5m``, ``1day``, ``2weeks``. Default: None. + timezone: For raw datapoints, which timezone to use when displaying (will not affect what is retrieved). For aggregates, which timezone to align to for granularity 'hour' and longer. Align to the start of the hour, day or month. For timezones of type Region/Location, like 'Europe/Oslo', pass a string or ``ZoneInfo`` instance. The aggregate duration will then vary, typically due to daylight saving time. You can also use a fixed offset from UTC by passing a string like '+04:00', 'UTC-7' or 'UTC-02:30' or an instance of ``datetime.timezone``. Note: Historical timezones with second offset are not supported, and timezones with minute offsets (e.g. UTC+05:30 or Asia/Kolkata) may take longer to execute. + target_unit: The unit_external_id of the datapoints returned. If the time series does not have a unit_external_id that can be converted to the target_unit, an error will be returned. Cannot be used with target_unit_system. + target_unit_system: The unit system of the datapoints returned. Cannot be used with target_unit. + limit: Maximum number of datapoints to return for each time series. Default: None (no limit) + include_outside_points: Whether to include outside points. Not allowed when fetching aggregates. Default: False + ignore_unknown_ids: Whether to ignore missing time series rather than raising an exception. Default: False + include_status: Also return the status code, an integer, for each datapoint in the response. Only relevant for raw datapoint queries, and the object aggregates ``min_datapoint`` and ``max_datapoint``. + ignore_bad_datapoints: Treat datapoints with a bad status code as if they do not exist. If set to false, raw queries will include bad datapoints in the response, and aggregates will in general omit the time period between a bad datapoint and the next good datapoint. Also, the period between a bad datapoint and the previous good datapoint will be considered constant. Default: True. + treat_uncertain_as_bad: Treat datapoints with uncertain status codes as bad. If false, treat datapoints with uncertain status codes as good. Used for both raw queries and aggregates. Default: True. Returns: - Datapoints | DatapointsList | None: A ``Datapoints`` object containing the requested data, or a ``DatapointsList`` if multiple time series were asked for (the ordering is ids first, then external_ids). If `ignore_unknown_ids` is `True`, a single time series is requested and it is not found, the function will return `None`. + A ``Datapoints`` object containing the requested data, or a ``DatapointsList`` if multiple time series were asked for (the ordering is ids first, then external_ids). If `ignore_unknown_ids` is `True`, a single time series is requested and it is not found, the function will return `None`. Examples: @@ -1356,25 +1356,25 @@ async def retrieve_arrays( `status codes. `_ Args: - id (None | int | DatapointsQuery | Sequence[int | DatapointsQuery]): Id, dict (with id) or (mixed) sequence of these. See examples below. - external_id (None | str | DatapointsQuery | SequenceNotStr[str | DatapointsQuery]): External id, dict (with external id) or (mixed) sequence of these. See examples below. - instance_id (None | NodeId | DatapointsQuery | Sequence[NodeId | DatapointsQuery]): Instance id or sequence of instance ids. - start (int | str | datetime.datetime | None): Inclusive start. Default: 1970-01-01 UTC. - end (int | str | datetime.datetime | None): Exclusive end. Default: "now" - aggregates (Aggregate | str | list[Aggregate | str] | None): Single aggregate or list of aggregates to retrieve. Available options: ``average``, ``continuous_variance``, ``count``, ``count_bad``, ``count_good``, ``count_uncertain``, ``discrete_variance``, ``duration_bad``, ``duration_good``, ``duration_uncertain``, ``interpolation``, ``max``, ``max_datapoint``, ``min``, ``min_datapoint``, ``step_interpolation``, ``sum`` and ``total_variation``. Default: None (raw datapoints returned) - granularity (str | None): The granularity to fetch aggregates at. Can be given as an abbreviation or spelled out for clarity: ``s/second(s)``, ``m/minute(s)``, ``h/hour(s)``, ``d/day(s)``, ``w/week(s)``, ``mo/month(s)``, ``q/quarter(s)``, or ``y/year(s)``. Examples: ``30s``, ``5m``, ``1day``, ``2weeks``. Default: None. - timezone (str | datetime.timezone | ZoneInfo | None): For raw datapoints, which timezone to use when displaying (will not affect what is retrieved). For aggregates, which timezone to align to for granularity 'hour' and longer. Align to the start of the hour, day or month. For timezones of type Region/Location, like 'Europe/Oslo', pass a string or ``ZoneInfo`` instance. The aggregate duration will then vary, typically due to daylight saving time. You can also use a fixed offset from UTC by passing a string like '+04:00', 'UTC-7' or 'UTC-02:30' or an instance of ``datetime.timezone``. Note: Historical timezones with second offset are not supported, and timezones with minute offsets (e.g. UTC+05:30 or Asia/Kolkata) may take longer to execute. - target_unit (str | None): The unit_external_id of the datapoints returned. If the time series does not have a unit_external_id that can be converted to the target_unit, an error will be returned. Cannot be used with target_unit_system. - target_unit_system (str | None): The unit system of the datapoints returned. Cannot be used with target_unit. - limit (int | None): Maximum number of datapoints to return for each time series. Default: None (no limit) - include_outside_points (bool): Whether to include outside points. Not allowed when fetching aggregates. Default: False - ignore_unknown_ids (bool): Whether to ignore missing time series rather than raising an exception. Default: False - include_status (bool): Also return the status code, an integer, for each datapoint in the response. Only relevant for raw datapoint queries, and the object aggregates ``min_datapoint`` and ``max_datapoint``. - ignore_bad_datapoints (bool): Treat datapoints with a bad status code as if they do not exist. If set to false, raw queries will include bad datapoints in the response, and aggregates will in general omit the time period between a bad datapoint and the next good datapoint. Also, the period between a bad datapoint and the previous good datapoint will be considered constant. Default: True. - treat_uncertain_as_bad (bool): Treat datapoints with uncertain status codes as bad. If false, treat datapoints with uncertain status codes as good. Used for both raw queries and aggregates. Default: True. + id: Id, dict (with id) or (mixed) sequence of these. See examples below. + external_id: External id, dict (with external id) or (mixed) sequence of these. See examples below. + instance_id: Instance id or sequence of instance ids. + start: Inclusive start. Default: 1970-01-01 UTC. + end: Exclusive end. Default: "now" + aggregates: Single aggregate or list of aggregates to retrieve. Available options: ``average``, ``continuous_variance``, ``count``, ``count_bad``, ``count_good``, ``count_uncertain``, ``discrete_variance``, ``duration_bad``, ``duration_good``, ``duration_uncertain``, ``interpolation``, ``max``, ``max_datapoint``, ``min``, ``min_datapoint``, ``step_interpolation``, ``sum`` and ``total_variation``. Default: None (raw datapoints returned) + granularity: The granularity to fetch aggregates at. Can be given as an abbreviation or spelled out for clarity: ``s/second(s)``, ``m/minute(s)``, ``h/hour(s)``, ``d/day(s)``, ``w/week(s)``, ``mo/month(s)``, ``q/quarter(s)``, or ``y/year(s)``. Examples: ``30s``, ``5m``, ``1day``, ``2weeks``. Default: None. + timezone: For raw datapoints, which timezone to use when displaying (will not affect what is retrieved). For aggregates, which timezone to align to for granularity 'hour' and longer. Align to the start of the hour, day or month. For timezones of type Region/Location, like 'Europe/Oslo', pass a string or ``ZoneInfo`` instance. The aggregate duration will then vary, typically due to daylight saving time. You can also use a fixed offset from UTC by passing a string like '+04:00', 'UTC-7' or 'UTC-02:30' or an instance of ``datetime.timezone``. Note: Historical timezones with second offset are not supported, and timezones with minute offsets (e.g. UTC+05:30 or Asia/Kolkata) may take longer to execute. + target_unit: The unit_external_id of the datapoints returned. If the time series does not have a unit_external_id that can be converted to the target_unit, an error will be returned. Cannot be used with target_unit_system. + target_unit_system: The unit system of the datapoints returned. Cannot be used with target_unit. + limit: Maximum number of datapoints to return for each time series. Default: None (no limit) + include_outside_points: Whether to include outside points. Not allowed when fetching aggregates. Default: False + ignore_unknown_ids: Whether to ignore missing time series rather than raising an exception. Default: False + include_status: Also return the status code, an integer, for each datapoint in the response. Only relevant for raw datapoint queries, and the object aggregates ``min_datapoint`` and ``max_datapoint``. + ignore_bad_datapoints: Treat datapoints with a bad status code as if they do not exist. If set to false, raw queries will include bad datapoints in the response, and aggregates will in general omit the time period between a bad datapoint and the next good datapoint. Also, the period between a bad datapoint and the previous good datapoint will be considered constant. Default: True. + treat_uncertain_as_bad: Treat datapoints with uncertain status codes as bad. If false, treat datapoints with uncertain status codes as good. Used for both raw queries and aggregates. Default: True. Returns: - DatapointsArray | DatapointsArrayList | None: A ``DatapointsArray`` object containing the requested data, or a ``DatapointsArrayList`` if multiple time series were asked for (the ordering is ids first, then external_ids). If `ignore_unknown_ids` is `True`, a single time series is requested and it is not found, the function will return `None`. + A ``DatapointsArray`` object containing the requested data, or a ``DatapointsArrayList`` if multiple time series were asked for (the ordering is ids first, then external_ids). If `ignore_unknown_ids` is `True`, a single time series is requested and it is not found, the function will return `None`. Note: For many more usage examples, check out the :py:meth:`~DatapointsAPI.retrieve` method which accepts exactly the same arguments. @@ -1491,29 +1491,29 @@ async def retrieve_dataframe( For many more usage examples, check out the :py:meth:`~DatapointsAPI.retrieve` method which accepts exactly the same arguments. Args: - id (None | int | DatapointsQuery | Sequence[int | DatapointsQuery]): Id, DatapointsQuery or (mixed) sequence of these. See examples. - external_id (None | str | DatapointsQuery | SequenceNotStr[str | DatapointsQuery]): External id, DatapointsQuery or (mixed) sequence of these. See examples. - instance_id (None | NodeId | DatapointsQuery | Sequence[NodeId | DatapointsQuery]): Instance id, DatapointsQuery or (mixed) sequence of these. See examples. - start (int | str | datetime.datetime | None): Inclusive start. Default: 1970-01-01 UTC. - end (int | str | datetime.datetime | None): Exclusive end. Default: "now" - aggregates (Aggregate | str | list[Aggregate | str] | None): Single aggregate or list of aggregates to retrieve. Available options: ``average``, ``continuous_variance``, ``count``, ``count_bad``, ``count_good``, ``count_uncertain``, ``discrete_variance``, ``duration_bad``, ``duration_good``, ``duration_uncertain``, ``interpolation``, ``max``, ``max_datapoint``, ``min``, ``min_datapoint``, ``step_interpolation``, ``sum`` and ``total_variation``. Default: None (raw datapoints returned) - granularity (str | None): The granularity to fetch aggregates at. Can be given as an abbreviation or spelled out for clarity: ``s/second(s)``, ``m/minute(s)``, ``h/hour(s)``, ``d/day(s)``, ``w/week(s)``, ``mo/month(s)``, ``q/quarter(s)``, or ``y/year(s)``. Examples: ``30s``, ``5m``, ``1day``, ``2weeks``. Default: None. - timezone (str | datetime.timezone | ZoneInfo | None): For raw datapoints, which timezone to use when displaying (will not affect what is retrieved). For aggregates, which timezone to align to for granularity 'hour' and longer. Align to the start of the hour, -day or -month. For timezones of type Region/Location, like 'Europe/Oslo', pass a string or ``ZoneInfo`` instance. The aggregate duration will then vary, typically due to daylight saving time. You can also use a fixed offset from UTC by passing a string like '+04:00', 'UTC-7' or 'UTC-02:30' or an instance of ``datetime.timezone``. Note: Historical timezones with second offset are not supported, and timezones with minute offsets (e.g. UTC+05:30 or Asia/Kolkata) may take longer to execute. - target_unit (str | None): The unit_external_id of the datapoints returned. If the time series does not have a unit_external_id that can be converted to the target_unit, an error will be returned. Cannot be used with target_unit_system. - target_unit_system (str | None): The unit system of the datapoints returned. Cannot be used with target_unit. - limit (int | None): Maximum number of datapoints to return for each time series. Default: None (no limit) - include_outside_points (bool): Whether to include outside points. Not allowed when fetching aggregates. Default: False - ignore_unknown_ids (bool): Whether to ignore missing time series rather than raising an exception. Default: False - ignore_bad_datapoints (bool): Treat datapoints with a bad status code as if they do not exist. If set to false, raw queries will include bad datapoints in the response, and aggregates will in general omit the time period between a bad datapoint and the next good datapoint. Also, the period between a bad datapoint and the previous good datapoint will be considered constant. Default: True. - treat_uncertain_as_bad (bool): Treat datapoints with uncertain status codes as bad. If false, treat datapoints with uncertain status codes as good. Used for both raw queries and aggregates. Default: True. - uniform_index (bool): If only querying aggregates AND a single granularity is used (that's NOT a calendar granularity like month/quarter/year) AND no limit is used AND no timezone is used, specifying `uniform_index=True` will return a dataframe with an equidistant datetime index from the earliest `start` to the latest `end` (missing values will be NaNs). If these requirements are not met, a ValueError is raised. Default: False - include_status (bool): Also return the status code, an integer, for each datapoint in the response. Only relevant for raw datapoint queries, and the object aggregates ``min_datapoint`` and ``max_datapoint``. Also adds the status info as a separate level in the columns (MultiIndex). - include_unit (bool): Include the unit_external_id in the dataframe columns, if present (separate MultiIndex level) - include_aggregate_name (bool): Include aggregate in the dataframe columns, if present (separate MultiIndex level) - include_granularity_name (bool): Include granularity in the dataframe columns, if present (separate MultiIndex level) + id: Id, DatapointsQuery or (mixed) sequence of these. See examples. + external_id: External id, DatapointsQuery or (mixed) sequence of these. See examples. + instance_id: Instance id, DatapointsQuery or (mixed) sequence of these. See examples. + start: Inclusive start. Default: 1970-01-01 UTC. + end: Exclusive end. Default: "now" + aggregates: Single aggregate or list of aggregates to retrieve. Available options: ``average``, ``continuous_variance``, ``count``, ``count_bad``, ``count_good``, ``count_uncertain``, ``discrete_variance``, ``duration_bad``, ``duration_good``, ``duration_uncertain``, ``interpolation``, ``max``, ``max_datapoint``, ``min``, ``min_datapoint``, ``step_interpolation``, ``sum`` and ``total_variation``. Default: None (raw datapoints returned) + granularity: The granularity to fetch aggregates at. Can be given as an abbreviation or spelled out for clarity: ``s/second(s)``, ``m/minute(s)``, ``h/hour(s)``, ``d/day(s)``, ``w/week(s)``, ``mo/month(s)``, ``q/quarter(s)``, or ``y/year(s)``. Examples: ``30s``, ``5m``, ``1day``, ``2weeks``. Default: None. + timezone: For raw datapoints, which timezone to use when displaying (will not affect what is retrieved). For aggregates, which timezone to align to for granularity 'hour' and longer. Align to the start of the hour, -day or -month. For timezones of type Region/Location, like 'Europe/Oslo', pass a string or ``ZoneInfo`` instance. The aggregate duration will then vary, typically due to daylight saving time. You can also use a fixed offset from UTC by passing a string like '+04:00', 'UTC-7' or 'UTC-02:30' or an instance of ``datetime.timezone``. Note: Historical timezones with second offset are not supported, and timezones with minute offsets (e.g. UTC+05:30 or Asia/Kolkata) may take longer to execute. + target_unit: The unit_external_id of the datapoints returned. If the time series does not have a unit_external_id that can be converted to the target_unit, an error will be returned. Cannot be used with target_unit_system. + target_unit_system: The unit system of the datapoints returned. Cannot be used with target_unit. + limit: Maximum number of datapoints to return for each time series. Default: None (no limit) + include_outside_points: Whether to include outside points. Not allowed when fetching aggregates. Default: False + ignore_unknown_ids: Whether to ignore missing time series rather than raising an exception. Default: False + ignore_bad_datapoints: Treat datapoints with a bad status code as if they do not exist. If set to false, raw queries will include bad datapoints in the response, and aggregates will in general omit the time period between a bad datapoint and the next good datapoint. Also, the period between a bad datapoint and the previous good datapoint will be considered constant. Default: True. + treat_uncertain_as_bad: Treat datapoints with uncertain status codes as bad. If false, treat datapoints with uncertain status codes as good. Used for both raw queries and aggregates. Default: True. + uniform_index: If only querying aggregates AND a single granularity is used (that's NOT a calendar granularity like month/quarter/year) AND no limit is used AND no timezone is used, specifying `uniform_index=True` will return a dataframe with an equidistant datetime index from the earliest `start` to the latest `end` (missing values will be NaNs). If these requirements are not met, a ValueError is raised. Default: False + include_status: Also return the status code, an integer, for each datapoint in the response. Only relevant for raw datapoint queries, and the object aggregates ``min_datapoint`` and ``max_datapoint``. Also adds the status info as a separate level in the columns (MultiIndex). + include_unit: Include the unit_external_id in the dataframe columns, if present (separate MultiIndex level) + include_aggregate_name: Include aggregate in the dataframe columns, if present (separate MultiIndex level) + include_granularity_name: Include granularity in the dataframe columns, if present (separate MultiIndex level) Returns: - pd.DataFrame: A pandas DataFrame containing the requested time series. The ordering of columns is ids first, then external_ids, and lastly instance_ids. For time series with multiple aggregates, they will be sorted in alphabetical order ("average" before "max"). + A pandas DataFrame containing the requested time series. The ordering of columns is ids first, then external_ids, and lastly instance_ids. For time series with multiple aggregates, they will be sorted in alphabetical order ("average" before "max"). Tip: Pandas DataFrames have one shared index, so when you fetch datapoints from multiple time series, the final index will be @@ -1819,19 +1819,19 @@ async def retrieve_latest( `status codes. `_ Args: - id (int | LatestDatapointQuery | Sequence[int | LatestDatapointQuery] | None): Id or list of ids. - external_id (str | LatestDatapointQuery | SequenceNotStr[str | LatestDatapointQuery] | None): External id or list of external ids. - instance_id (NodeId | LatestDatapointQuery | Sequence[NodeId | LatestDatapointQuery] | None): Instance id or list of instance ids. - before (None | int | str | datetime.datetime): Get latest datapoint before this time. Not used when passing 'LatestDatapointQuery'. - target_unit (str | None): The unit_external_id of the datapoint returned. If the time series does not have a unit_external_id that can be converted to the target_unit, an error will be returned. Cannot be used with target_unit_system. - target_unit_system (str | None): The unit system of the datapoint returned. Cannot be used with target_unit. - include_status (bool): Also return the status code, an integer, for each datapoint in the response. - ignore_bad_datapoints (bool): Prevent datapoints with a bad status code to be returned. Default: True. - treat_uncertain_as_bad (bool): Treat uncertain status codes as bad. If false, treat uncertain as good. Default: True. - ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception. + id: Id or list of ids. + external_id: External id or list of external ids. + instance_id: Instance id or list of instance ids. + before: Get latest datapoint before this time. Not used when passing 'LatestDatapointQuery'. + target_unit: The unit_external_id of the datapoint returned. If the time series does not have a unit_external_id that can be converted to the target_unit, an error will be returned. Cannot be used with target_unit_system. + target_unit_system: The unit system of the datapoint returned. Cannot be used with target_unit. + include_status: Also return the status code, an integer, for each datapoint in the response. + ignore_bad_datapoints: Prevent datapoints with a bad status code to be returned. Default: True. + treat_uncertain_as_bad: Treat uncertain status codes as bad. If false, treat uncertain as good. Default: True. + ignore_unknown_ids: Ignore IDs and external IDs that are not found rather than throw an exception. Returns: - Datapoints | DatapointsList | None: A Datapoints object containing the requested data, or a DatapointsList if multiple were requested. If `ignore_unknown_ids` is `True`, a single time series is requested and it is not found, the function will return `None`. + A Datapoints object containing the requested data, or a DatapointsList if multiple were requested. If `ignore_unknown_ids` is `True`, a single time series is requested and it is not found, the function will return `None`. Examples: @@ -1937,10 +1937,10 @@ async def insert( `status codes. `_ Args: - datapoints (Datapoints | DatapointsArray | Sequence[dict[str, int | float | str | datetime.datetime]] | Sequence[tuple[int | float | datetime.datetime, int | float | str] | tuple[int | float | datetime.datetime, int | float | str, int]]): The datapoints you wish to insert. Can either be a list of tuples, a list of dictionaries, a Datapoints object or a DatapointsArray object. See examples below. - id (int | None): Id of time series to insert datapoints into. - external_id (str | None): External id of time series to insert datapoint into. - instance_id (NodeId | None): Instance ID of time series to insert datapoints into. + datapoints: The datapoints you wish to insert. Can either be a list of tuples, a list of dictionaries, a Datapoints object or a DatapointsArray object. See examples below. + id: Id of time series to insert datapoints into. + external_id: External id of time series to insert datapoint into. + instance_id: Instance ID of time series to insert datapoints into. Note: All datapoints inserted without a status code (or symbol) is assumed to be good (code 0). To mark a value, pass @@ -2024,7 +2024,7 @@ async def insert_multiple( `status codes. `_ Args: - datapoints (list[dict[str, str | int | list | Datapoints | DatapointsArray | NodeId]]): The datapoints you wish to insert along with the ids of the time series. See examples below. + datapoints: The datapoints you wish to insert along with the ids of the time series. See examples below. Note: All datapoints inserted without a status code (or symbol) is assumed to be good (code 0). To mark a value, pass @@ -2097,11 +2097,11 @@ async def delete_range( """Delete a range of datapoints from a time series. Args: - start (int | str | datetime.datetime): Inclusive start of delete range - end (int | str | datetime.datetime): Exclusive end of delete range - id (int | None): Id of time series to delete data from - external_id (str | None): External id of time series to delete data from - instance_id (NodeId | None): Instance ID of time series to delete data from + start: Inclusive start of delete range + end: Exclusive end of delete range + id: Id of time series to delete data from + external_id: External id of time series to delete data from + instance_id: Instance ID of time series to delete data from Examples: @@ -2129,7 +2129,7 @@ async def delete_ranges(self, ranges: list[dict[str, Any]]) -> None: """`Delete a range of datapoints from multiple time series. `_ Args: - ranges (list[dict[str, Any]]): The list of datapoint ids along with time range to delete. See examples below. + ranges: The list of datapoint ids along with time range to delete. See examples below. Examples: @@ -2171,8 +2171,8 @@ async def insert_dataframe(self, df: pd.DataFrame, dropna: bool = True) -> None: The column identifiers must be unique. Args: - df (pd.DataFrame): Pandas DataFrame object containing the time series. - dropna (bool): Set to True to ignore NaNs in the given DataFrame, applied per column. Default: True. + df: Pandas DataFrame object containing the time series. + dropna: Set to True to ignore NaNs in the given DataFrame, applied per column. Default: True. Warning: You can not insert datapoints with status codes using this method (``insert_dataframe``), you'll need diff --git a/cognite/client/_api/datapoints_subscriptions.py b/cognite/client/_api/datapoints_subscriptions.py index b1d7f6eaa4..6dff93f609 100644 --- a/cognite/client/_api/datapoints_subscriptions.py +++ b/cognite/client/_api/datapoints_subscriptions.py @@ -50,11 +50,11 @@ async def __call__( """Iterate over all datapoint subscriptions. Args: - chunk_size (int | None): The number of datapoint subscriptions to fetch per request. Defaults to yielding one datapoint subscription at a time. - limit (int | None): Maximum number of items to return. Defaults to return all datapoint subscriptions. + chunk_size: The number of datapoint subscriptions to fetch per request. Defaults to yielding one datapoint subscription at a time. + limit: Maximum number of items to return. Defaults to return all datapoint subscriptions. Yields: - DatapointSubscription | DatapointSubscriptionList: Yields datapoint subscriptions one by one if chunk is not specified, otherwise returns a list of datapoint subscriptions. + Yields datapoint subscriptions one by one if chunk is not specified, otherwise returns a list of datapoint subscriptions. """ # noqa: DOC404 async for item in self._list_generator( method="GET", @@ -71,10 +71,10 @@ async def create(self, subscription: DataPointSubscriptionWrite) -> DatapointSub Create a subscription that can be used to listen for changes in data points for a set of time series. Args: - subscription (DataPointSubscriptionWrite): Subscription to create. + subscription: Subscription to create. Returns: - DatapointSubscription: Created subscription + Created subscription Examples: @@ -130,8 +130,8 @@ async def delete(self, external_id: str | SequenceNotStr[str], ignore_unknown_id """`Delete subscription(s). This operation cannot be undone. `_ Args: - external_id (str | SequenceNotStr[str]): External ID or list of external IDs of subscriptions to delete. - ignore_unknown_ids (bool): Whether to ignore IDs and external IDs that are not found rather than throw an exception. + external_id: External ID or list of external IDs of subscriptions to delete. + ignore_unknown_ids: Whether to ignore IDs and external IDs that are not found rather than throw an exception. Examples: @@ -153,10 +153,10 @@ async def retrieve(self, external_id: str) -> DatapointSubscription | None: """`Retrieve one subscription by external ID. `_ Args: - external_id (str): External ID of the subscription to retrieve. + external_id: External ID of the subscription to retrieve. Returns: - DatapointSubscription | None: The requested subscription. + The requested subscription. Examples: @@ -186,11 +186,11 @@ async def list_member_time_series( Retrieve a list of time series (IDs) that the subscription is currently retrieving updates from Args: - external_id (str): External ID of the subscription to retrieve members of. - limit (int | None): Maximum number of time series to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + external_id: External ID of the subscription to retrieve members of. + limit: Maximum number of time series to return. Defaults to 25. Set to -1, float("inf") or None to return all items. Returns: - TimeSeriesIDList: List of time series in the subscription. + List of time series in the subscription. Examples: @@ -224,11 +224,11 @@ async def update( Furthermore, the subscription partition cannot be changed. Args: - update (DataPointSubscriptionUpdate | DataPointSubscriptionWrite): The subscription update. - mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (DataPointSubscriptionWrite). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. + update: The subscription update. + mode: How to update data when a non-update object is given (DataPointSubscriptionWrite). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. Returns: - DatapointSubscription: Updated subscription. + Updated subscription. Examples: @@ -279,18 +279,18 @@ async def iterate_data( older than 7 days may be discarded. Args: - external_id (str): The external ID of the subscription. - start (str | None): When to start the iteration. If set to None, the iteration will start from the beginning. The format is "N[timeunit]-ago", where timeunit is w,d,h,m (week, day, hour, minute). For example, "12h-ago" will start the iteration from 12 hours ago. You can also set it to "now" to jump straight to the end. Defaults to None. - limit (int): Approximate number of results to return across all partitions. - partition (int): The partition to iterate over. Defaults to 0. - poll_timeout (int): How many seconds to wait for new data, until an empty response is sent. Defaults to 5. - cursor (str | None): Optional cursor to start iterating from. - include_status (bool): Also return the status code, an integer, for each datapoint in the response. - ignore_bad_datapoints (bool): Do not return bad datapoints. Default: True. - treat_uncertain_as_bad (bool): Treat datapoints with uncertain status codes as bad. If false, treat datapoints with uncertain status codes as good. Default: True. + external_id: The external ID of the subscription. + start: When to start the iteration. If set to None, the iteration will start from the beginning. The format is "N[timeunit]-ago", where timeunit is w,d,h,m (week, day, hour, minute). For example, "12h-ago" will start the iteration from 12 hours ago. You can also set it to "now" to jump straight to the end. Defaults to None. + limit: Approximate number of results to return across all partitions. + partition: The partition to iterate over. Defaults to 0. + poll_timeout: How many seconds to wait for new data, until an empty response is sent. Defaults to 5. + cursor: Optional cursor to start iterating from. + include_status: Also return the status code, an integer, for each datapoint in the response. + ignore_bad_datapoints: Do not return bad datapoints. Default: True. + treat_uncertain_as_bad: Treat datapoints with uncertain status codes as bad. If false, treat datapoints with uncertain status codes as good. Default: True. Yields: - DatapointSubscriptionBatch: Changes to the subscription and data in the subscribed time series. + Changes to the subscription and data in the subscribed time series. Examples: @@ -347,9 +347,9 @@ async def list(self, limit: int | None = DEFAULT_LIMIT_READ) -> DatapointSubscri """`List data point subscriptions `_ Args: - limit (int | None): Maximum number of subscriptions to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + limit: Maximum number of subscriptions to return. Defaults to 25. Set to -1, float("inf") or None to return all items. Returns: - DatapointSubscriptionList: List of requested datapoint subscriptions + List of requested datapoint subscriptions Examples: diff --git a/cognite/client/_api/diagrams.py b/cognite/client/_api/diagrams.py index 4751a78315..c9d11c71bb 100644 --- a/cognite/client/_api/diagrams.py +++ b/cognite/client/_api/diagrams.py @@ -137,19 +137,19 @@ async def detect( are able to access the data sent to this endpoint. Args: - entities (Sequence[dict | CogniteResource]): List of entities to detect - search_field (str): If entities is a list of dictionaries, this is the key to the values to detect in the PnId - partial_match (bool): Allow for a partial match (e.g. missing prefix). - min_tokens (int): Minimal number of tokens a match must be based on - file_ids (int | Sequence[int] | None): ID of the files, should already be uploaded in the same tenant. - file_external_ids (str | SequenceNotStr[str] | None): File external ids, alternative to file_ids and file_references. - file_instance_ids (NodeId | Sequence[NodeId] | None): Files to detect in, specified by instance id. - file_references (list[FileReference] | FileReference | None): File references (id, external_id or instance_id), and first_page and last_page to specify page ranges per file. Each reference can specify up to 50 pages. Providing a page range will also make the page count of the document a part of the response. - pattern_mode (bool | None): If True, entities must be provided with a sample field. This enables detecting tags that are similar to the sample, but not necessarily identical. Defaults to None. - configuration (DiagramDetectConfig | None): Additional configuration for the detect algorithm. See `DiagramDetectConfig` class documentation and `beta API docs `_. - multiple_jobs (bool): Enables you to publish multiple jobs. If True the method returns a tuple of DetectJobBundle and list of potentially unposted files. If False it will return a single DiagramDetectResults. Defaults to False. + entities: List of entities to detect + search_field: If entities is a list of dictionaries, this is the key to the values to detect in the PnId + partial_match: Allow for a partial match (e.g. missing prefix). + min_tokens: Minimal number of tokens a match must be based on + file_ids: ID of the files, should already be uploaded in the same tenant. + file_external_ids: File external ids, alternative to file_ids and file_references. + file_instance_ids: Files to detect in, specified by instance id. + file_references: File references (id, external_id or instance_id), and first_page and last_page to specify page ranges per file. Each reference can specify up to 50 pages. Providing a page range will also make the page count of the document a part of the response. + pattern_mode: If True, entities must be provided with a sample field. This enables detecting tags that are similar to the sample, but not necessarily identical. Defaults to None. + configuration: Additional configuration for the detect algorithm. See `DiagramDetectConfig` class documentation and `beta API docs `_. + multiple_jobs: Enables you to publish multiple jobs. If True the method returns a tuple of DetectJobBundle and list of potentially unposted files. If False it will return a single DiagramDetectResults. Defaults to False. Returns: - DiagramDetectResults | tuple[DetectJobBundle, list[dict[str, Any]]]: Resulting queued job or a bundle of jobs and a list of unposted files. Note that the .result property of the job or job bundle will block waiting for results. + Resulting queued job or a bundle of jobs and a list of unposted files. Note that the .result property of the job or job bundle will block waiting for results. Note: The results are not written to CDF, to create annotations based on detected entities use `AnnotationsAPI`. @@ -319,10 +319,10 @@ async def convert(self, detect_job: DiagramDetectResults) -> DiagramConvertResul Will automatically wait for the detect job to complete before starting the conversion. Args: - detect_job (DiagramDetectResults): detect job + detect_job: detect job Returns: - DiagramConvertResults: Resulting queued job. + Resulting queued job. Examples: diff --git a/cognite/client/_api/document_preview.py b/cognite/client/_api/document_preview.py index d86ab11d44..2e1b88d0bf 100644 --- a/cognite/client/_api/document_preview.py +++ b/cognite/client/_api/document_preview.py @@ -14,11 +14,11 @@ async def download_page_as_png_bytes(self, id: int, page_number: int = 1) -> byt """`Downloads an image preview for a specific page of the specified document. `_ Args: - id (int): The server-generated ID for the document you want to retrieve the preview of. - page_number (int): Page number to preview. Starting at 1 for first page. + id: The server-generated ID for the document you want to retrieve the preview of. + page_number: Page number to preview. Starting at 1 for first page. Returns: - bytes: The png preview of the document. + The png preview of the document. Examples: @@ -48,10 +48,10 @@ async def download_page_as_png( """`Downloads an image preview for a specific page of the specified document. `_ Args: - path (Path | str | IO): The path to save the png preview of the document. If the path is a directory, the file name will be '[id]_page[page_number].png'. - id (int): The server-generated ID for the document you want to retrieve the preview of. - page_number (int): Page number to preview. Starting at 1 for first page. - overwrite (bool): Whether to overwrite existing file at the given path. Defaults to False. + path: The path to save the png preview of the document. If the path is a directory, the file name will be '[id]_page[page_number].png'. + id: The server-generated ID for the document you want to retrieve the preview of. + page_number: Page number to preview. Starting at 1 for first page. + overwrite: Whether to overwrite existing file at the given path. Defaults to False. Examples: @@ -84,10 +84,10 @@ async def download_document_as_pdf_bytes(self, id: int) -> bytes: Previews will be rendered if necessary during the request. Be prepared for the request to take a few seconds to complete. Args: - id (int): The server-generated ID for the document you want to retrieve the preview of. + id: The server-generated ID for the document you want to retrieve the preview of. Returns: - bytes: The pdf preview of the document. + The pdf preview of the document. Examples: @@ -111,9 +111,9 @@ async def download_document_as_pdf(self, path: Path | str | IO, id: int, overwri Previews will be rendered if necessary during the request. Be prepared for the request to take a few seconds to complete. Args: - path (Path | str | IO): The path to save the pdf preview of the document. If the path is a directory, the file name will be '[id].pdf'. - id (int): The server-generated ID for the document you want to retrieve the preview of. - overwrite (bool): Whether to overwrite existing file at the given path. Defaults to False. + path: The path to save the pdf preview of the document. If the path is a directory, the file name will be '[id].pdf'. + id: The server-generated ID for the document you want to retrieve the preview of. + overwrite: Whether to overwrite existing file at the given path. Defaults to False. Examples: @@ -144,10 +144,10 @@ async def retrieve_pdf_link(self, id: int) -> TemporaryLink: """`Retrieve a Temporary link to download pdf preview `_ Args: - id (int): The server-generated ID for the document you want to retrieve the preview of. + id: The server-generated ID for the document you want to retrieve the preview of. Returns: - TemporaryLink: A temporary link to download the pdf preview. + A temporary link to download the pdf preview. Examples: diff --git a/cognite/client/_api/documents.py b/cognite/client/_api/documents.py index 16396c615d..9aeb8f1e45 100644 --- a/cognite/client/_api/documents.py +++ b/cognite/client/_api/documents.py @@ -66,13 +66,13 @@ async def __call__( Fetches documents as they are iterated over, so you keep a limited number of documents in memory. Args: - chunk_size (int | None): Number of documents to return in each chunk. Defaults to yielding one document at a time. - filter (Filter | dict[str, Any] | None): The filter to narrow down the documents to return. - sort (DocumentSort | SortableProperty | tuple[SortableProperty, Literal['asc', 'desc']] | None): The property to sort by. The default order is ascending. - limit (int | None): Maximum number of documents to return. Default to return all items. + chunk_size: Number of documents to return in each chunk. Defaults to yielding one document at a time. + filter: The filter to narrow down the documents to return. + sort: The property to sort by. The default order is ascending. + limit: Maximum number of documents to return. Default to return all items. Yields: - Document | DocumentList: yields Documents one by one if chunk_size is not specified, else DocumentList objects. + yields Documents one by one if chunk_size is not specified, else DocumentList objects. """ # noqa: DOC404 self._validate_filter(filter) async for item in self._list_generator( @@ -90,11 +90,11 @@ async def aggregate_count(self, query: str | None = None, filter: Filter | dict[ """`Count of documents matching the specified filters and search. `_ Args: - query (str | None): The free text search query, for details see the documentation referenced above. - filter (Filter | dict[str, Any] | None): The filter to narrow down the documents to count. + query: The free text search query, for details see the documentation referenced above. + filter: The filter to narrow down the documents to count. Returns: - int: The number of documents matching the specified filters and search. + The number of documents matching the specified filters and search. Examples: @@ -137,13 +137,13 @@ async def aggregate_cardinality_values( """`Find approximate property count for documents. `_ Args: - property (DocumentProperty | SourceFileProperty | list[str] | str): The property to count the cardinality of. - query (str | None): The free text search query, for details see the documentation referenced above. - filter (Filter | dict[str, Any] | None): The filter to narrow down the documents to count cardinality. - aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets. + property: The property to count the cardinality of. + query: The free text search query, for details see the documentation referenced above. + filter: The filter to narrow down the documents to count cardinality. + aggregate_filter: The filter to apply to the resulting buckets. Returns: - int: The number of documents matching the specified filters and search. + The number of documents matching the specified filters and search. Examples: @@ -190,13 +190,13 @@ async def aggregate_cardinality_properties( """`Find approximate paths count for documents. `_ Args: - path (SourceFileProperty | list[str]): The scope in every document to aggregate properties. The only value allowed now is ["sourceFile", "metadata"]. It means to aggregate only metadata properties (aka keys). - query (str | None): The free text search query, for details see the documentation referenced above. - filter (Filter | dict[str, Any] | None): The filter to narrow down the documents to count cardinality. - aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets. + path: The scope in every document to aggregate properties. The only value allowed now is ["sourceFile", "metadata"]. It means to aggregate only metadata properties (aka keys). + query: The free text search query, for details see the documentation referenced above. + filter: The filter to narrow down the documents to count cardinality. + aggregate_filter: The filter to apply to the resulting buckets. Returns: - int: The number of documents matching the specified filters and search. + The number of documents matching the specified filters and search. Examples: @@ -228,14 +228,14 @@ async def aggregate_unique_values( """`Get unique properties with counts for documents. `_ Args: - property (DocumentProperty | SourceFileProperty | list[str] | str): The property to group by. - query (str | None): The free text search query, for details see the documentation referenced above. - filter (Filter | dict[str, Any] | None): The filter to narrow down the documents to count cardinality. - aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets. - limit (int): Maximum number of items. Defaults to 25. + property: The property to group by. + query: The free text search query, for details see the documentation referenced above. + filter: The filter to narrow down the documents to count cardinality. + aggregate_filter: The filter to apply to the resulting buckets. + limit: Maximum number of items. Defaults to 25. Returns: - UniqueResultList: List of unique values of documents matching the specified filters and search. + List of unique values of documents matching the specified filters and search. Examples: @@ -286,14 +286,14 @@ async def aggregate_unique_properties( """`Get unique paths with counts for documents. `_ Args: - path (DocumentProperty | SourceFileProperty | list[str] | str): The scope in every document to aggregate properties. The only value allowed now is ["metadata"]. It means to aggregate only metadata properties (aka keys). - query (str | None): The free text search query, for details see the documentation referenced above. - filter (Filter | dict[str, Any] | None): The filter to narrow down the documents to count cardinality. - aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets. - limit (int): Maximum number of items. Defaults to 25. + path: The scope in every document to aggregate properties. The only value allowed now is ["metadata"]. It means to aggregate only metadata properties (aka keys). + query: The free text search query, for details see the documentation referenced above. + filter: The filter to narrow down the documents to count cardinality. + aggregate_filter: The filter to apply to the resulting buckets. + limit: Maximum number of items. Defaults to 25. Returns: - UniqueResultList: List of unique values of documents matching the specified filters and search. + List of unique values of documents matching the specified filters and search. Examples: @@ -334,12 +334,12 @@ async def retrieve_content( you can use this endpoint. Args: - id (int | None): The server-generated ID for the document you want to retrieve the content of. - external_id (str | None): External ID of the document. - instance_id (NodeId | None): Instance ID of the document. + id: The server-generated ID for the document you want to retrieve the content of. + external_id: External ID of the document. + instance_id: Instance ID of the document. Returns: - bytes: The content of the document. + The content of the document. Examples: @@ -386,10 +386,10 @@ async def retrieve_content_buffer( you can use this endpoint. Args: - buffer (BinaryIO): The document content is streamed directly into the buffer. This is useful for retrieving large documents. - id (int | None): The server-generated ID for the document you want to retrieve the content of. - external_id (str | None): External ID of the document. - instance_id (NodeId | None): Instance ID of the document. + buffer: The document content is streamed directly into the buffer. This is useful for retrieving large documents. + id: The server-generated ID for the document you want to retrieve the content of. + external_id: External ID of the document. + instance_id: Instance ID of the document. Examples: @@ -457,14 +457,14 @@ async def search( endpoint documentation referenced above. Args: - query (str): The free text search query. - highlight (bool): Whether or not matches in search results should be highlighted. - filter (Filter | dict[str, Any] | None): The filter to narrow down the documents to search. - sort (DocumentSort | SortableProperty | tuple[SortableProperty, Literal['asc', 'desc']] | None): The property to sort by. The default order is ascending. - limit (int): Maximum number of items to return. When using highlights, the maximum value is reduced to 20. Defaults to 25. + query: The free text search query. + highlight: Whether or not matches in search results should be highlighted. + filter: The filter to narrow down the documents to search. + sort: The property to sort by. The default order is ascending. + limit: Maximum number of items to return. When using highlights, the maximum value is reduced to 20. Defaults to 25. Returns: - DocumentList | DocumentHighlightList: List of search results. If highlight is True, a DocumentHighlightList is returned, otherwise a DocumentList is returned. + List of search results. If highlight is True, a DocumentHighlightList is returned, otherwise a DocumentList is returned. Examples: @@ -526,12 +526,12 @@ async def list( project. Args: - filter (Filter | dict[str, Any] | None): Filter | dict[str, Any] | None): The filter to narrow down the documents to return. - sort (DocumentSort | SortableProperty | tuple[SortableProperty, Literal['asc', 'desc']] | None): The property to sort by. The default order is ascending. - limit (int | None): Maximum number of documents to return. Defaults to 25. Set to None or -1 to return all documents. + filter: The filter to narrow down the documents to return. + sort: The property to sort by. The default order is ascending. + limit: Maximum number of documents to return. Defaults to 25. Set to None or -1 to return all documents. Returns: - DocumentList: List of documents + List of documents Examples: diff --git a/cognite/client/_api/entity_matching.py b/cognite/client/_api/entity_matching.py index 454e503b6b..ac3ffd881e 100644 --- a/cognite/client/_api/entity_matching.py +++ b/cognite/client/_api/entity_matching.py @@ -29,11 +29,11 @@ async def retrieve(self, id: int | None = None, external_id: str | None = None) """`Retrieve model `_ Args: - id (int | None): id of the model to retrieve. - external_id (str | None): external id of the model to retrieve. + id: id of the model to retrieve. + external_id: external id of the model to retrieve. Returns: - EntityMatchingModel | None: Model requested. + Model requested. Examples: >>> from cognite.client import CogniteClient, AsyncCogniteClient @@ -53,11 +53,11 @@ async def retrieve_multiple( """`Retrieve models `_ Args: - ids (Sequence[int] | None): ids of the model to retrieve. - external_ids (SequenceNotStr[str] | None): external ids of the model to retrieve. + ids: ids of the model to retrieve. + external_ids: external ids of the model to retrieve. Returns: - EntityMatchingModelList: Models requested. + Models requested. Examples: >>> from cognite.client import CogniteClient, AsyncCogniteClient @@ -81,11 +81,11 @@ async def update( """`Update model `_ Args: - item (EntityMatchingModel | EntityMatchingModelUpdate | Sequence[EntityMatchingModel | EntityMatchingModelUpdate]): Model(s) to update - mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (EntityMatchingModel). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. + item: Model(s) to update + mode: How to update data when a non-update object is given (EntityMatchingModel). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. Returns: - EntityMatchingModelList | EntityMatchingModel: No description. + No description. Examples: >>> from cognite.client.data_classes.contextualization import EntityMatchingModelUpdate @@ -114,15 +114,15 @@ async def list( """`List models `_ Args: - name (str | None): Optional user-defined name of model. - description (str | None): Optional user-defined description of model. - original_id (int | None): id of the original model for models that were created with refit. - feature_type (str | None): feature type that defines the combination of features used. - classifier (str | None): classifier used in training. - limit (int | None): Maximum number of items to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + name: Optional user-defined name of model. + description: Optional user-defined description of model. + original_id: id of the original model for models that were created with refit. + feature_type: feature type that defines the combination of features used. + classifier: classifier used in training. + limit: Maximum number of items to return. Defaults to 25. Set to -1, float("inf") or None to return all items. Returns: - EntityMatchingModelList: List of models. + List of models. Examples: >>> from cognite.client import CogniteClient, AsyncCogniteClient @@ -156,7 +156,7 @@ async def list_jobs(self) -> ContextualizationJobList: """List jobs, typically model fit and predict runs. Returns: - ContextualizationJobList: List of jobs. + List of jobs. """ return ContextualizationJobList._load( unpack_items(await self._get(self._RESOURCE_PATH + "/jobs", semaphore=self._get_semaphore("read"))) @@ -171,8 +171,8 @@ async def delete( Args: - id (int | Sequence[int] | None): Id or list of ids - external_id (str | SequenceNotStr[str] | None): External ID or list of external ids + id: Id or list of ids + external_id: External ID or list of external ids Examples: >>> from cognite.client import CogniteClient, AsyncCogniteClient >>> client = CogniteClient() @@ -206,18 +206,18 @@ async def fit( capabilities in the project, are able to access the data sent to this endpoint. Args: - sources (Sequence[dict | CogniteResource]): entities to match from, should have an 'id' field. Tolerant to passing more than is needed or used (e.g. json dump of time series list). Metadata fields are automatically flattened to "metadata.key" entries, such that they can be used in match_fields. - targets (Sequence[dict | CogniteResource]): entities to match to, should have an 'id' field. Tolerant to passing more than is needed or used. - true_matches (Sequence[dict | tuple[int | str, int | str]] | None): Known valid matches given as a list of dicts with keys 'sourceId', 'sourceExternalId', 'targetId', 'targetExternalId'). If omitted, uses an unsupervised model. A tuple can be used instead of the dictionary for convenience, interpreted as id/externalId based on type. - match_fields (dict | Sequence[tuple[str, str]] | None): List of (from,to) keys to use in matching. Default in the API is [('name','name')]. Also accepts {"source": .., "target": ..}. - feature_type (str | None): feature type that defines the combination of features used, see API docs for details. - classifier (str | None): classifier used in training. - ignore_missing_fields (bool): whether missing data in match_fields should return error or be filled in with an empty string. - name (str | None): Optional user-defined name of model. - description (str | None): Optional user-defined description of model. - external_id (str | None): Optional external id. Must be unique within the project. + sources: entities to match from, should have an 'id' field. Tolerant to passing more than is needed or used (e.g. json dump of time series list). Metadata fields are automatically flattened to "metadata.key" entries, such that they can be used in match_fields. + targets: entities to match to, should have an 'id' field. Tolerant to passing more than is needed or used. + true_matches: Known valid matches given as a list of dicts with keys 'sourceId', 'sourceExternalId', 'targetId', 'targetExternalId'). If omitted, uses an unsupervised model. A tuple can be used instead of the dictionary for convenience, interpreted as id/externalId based on type. + match_fields: List of (from,to) keys to use in matching. Default in the API is [('name','name')]. Also accepts {"source": .., "target": ..}. + feature_type: feature type that defines the combination of features used, see API docs for details. + classifier: classifier used in training. + ignore_missing_fields: whether missing data in match_fields should return error or be filled in with an empty string. + name: Optional user-defined name of model. + description: Optional user-defined description of model. + external_id: Optional external id. Must be unique within the project. Returns: - EntityMatchingModel: Resulting queued model. + Resulting queued model. Example: >>> from cognite.client import CogniteClient, AsyncCogniteClient @@ -279,15 +279,15 @@ async def predict( capabilities in the project, are able to access the data sent to this endpoint. Args: - sources (Sequence[dict] | None): entities to match from, does not need an 'id' field. Tolerant to passing more than is needed or used (e.g. json dump of time series list). If omitted, will use data from fit. - targets (Sequence[dict] | None): entities to match to, does not need an 'id' field. Tolerant to passing more than is needed or used. If omitted, will use data from fit. - num_matches (int): number of matches to return for each item. - score_threshold (float | None): only return matches with a score above this threshold - id (int | None): id of the model to use. - external_id (str | None): external id of the model to use. + sources: entities to match from, does not need an 'id' field. Tolerant to passing more than is needed or used (e.g. json dump of time series list). If omitted, will use data from fit. + targets: entities to match to, does not need an 'id' field. Tolerant to passing more than is needed or used. If omitted, will use data from fit. + num_matches: number of matches to return for each item. + score_threshold: only return matches with a score above this threshold + id: id of the model to use. + external_id: external id of the model to use. Returns: - EntityMatchingPredictionResult: object which can be used to wait for and retrieve results. + object which can be used to wait for and retrieve results. Examples: >>> from cognite.client import CogniteClient, AsyncCogniteClient @@ -334,11 +334,11 @@ async def refit( capabilities in the project, are able to access the data sent to this endpoint. Args: - true_matches (Sequence[dict | tuple[int | str, int | str]]): Updated known valid matches given as a list of dicts with keys 'fromId', 'fromExternalId', 'toId', 'toExternalId'). A tuple can be used instead of the dictionary for convenience, interpreted as id/externalId based on type. - id (int | None): id of the model to use. - external_id (str | None): external id of the model to use. + true_matches: Updated known valid matches given as a list of dicts with keys 'fromId', 'fromExternalId', 'toId', 'toExternalId'). A tuple can be used instead of the dictionary for convenience, interpreted as id/externalId based on type. + id: id of the model to use. + external_id: external id of the model to use. Returns: - EntityMatchingModel: new model refitted to true_matches. + new model refitted to true_matches. Examples: >>> from cognite.client import CogniteClient, AsyncCogniteClient diff --git a/cognite/client/_api/events.py b/cognite/client/_api/events.py index 9f5873c3be..642a910e76 100644 --- a/cognite/client/_api/events.py +++ b/cognite/client/_api/events.py @@ -113,29 +113,29 @@ async def __call__( Fetches events as they are iterated over, so you keep a limited number of events in memory. Args: - chunk_size (int | None): Number of events to return in each chunk. Defaults to yielding one event a time. - start_time (dict[str, Any] | TimestampRange | None): Range between two timestamps - end_time (dict[str, Any] | EndTimeFilter | None): Range between two timestamps - active_at_time (dict[str, Any] | TimestampRange | None): Event is considered active from its startTime to endTime inclusive. If startTime is null, event is never active. If endTime is null, event is active from startTime onwards. activeAtTime filter will match all events that are active at some point from min to max, from min, or to max, depending on which of min and max parameters are specified. - type (str | None): Type of the event, e.g 'failure'. - subtype (str | None): Subtype of the event, e.g 'electrical'. - metadata (dict[str, str] | None): Customizable extra data about the event. String key -> String value. - asset_ids (Sequence[int] | None): Asset IDs of related equipments that this event relates to. - asset_external_ids (SequenceNotStr[str] | None): Asset External IDs of related equipment that this event relates to. - asset_subtree_ids (int | Sequence[int] | None): Only include events that have a related asset in a subtree rooted at any of these assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. - asset_subtree_external_ids (str | SequenceNotStr[str] | None): Only include events that have a related asset in a subtree rooted at any of these assetExternalIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. - data_set_ids (int | Sequence[int] | None): Return only events in the specified data set(s) with this id / these ids. - data_set_external_ids (str | SequenceNotStr[str] | None): Return only events in the specified data set(s) with this external id / these external ids. - source (str | None): The source of this event. - created_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. - last_updated_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. - external_id_prefix (str | None): External Id provided by client. Should be unique within the project - sort (SortSpec | list[SortSpec] | None): The criteria to sort by. Defaults to desc for `_score_` and asc for all other properties. Sort is not allowed if `partitions` is used. - limit (int | None): Maximum number of events to return. Defaults to return all items. - advanced_filter (Filter | dict[str, Any] | None): Advanced filter query using the filter DSL (Domain Specific Language). It allows defining complex filtering expressions that combine simple operations, such as equals, prefix, exists, etc., using boolean operators and, or, and not. + chunk_size: Number of events to return in each chunk. Defaults to yielding one event a time. + start_time: Range between two timestamps + end_time: Range between two timestamps + active_at_time: Event is considered active from its startTime to endTime inclusive. If startTime is null, event is never active. If endTime is null, event is active from startTime onwards. activeAtTime filter will match all events that are active at some point from min to max, from min, or to max, depending on which of min and max parameters are specified. + type: Type of the event, e.g 'failure'. + subtype: Subtype of the event, e.g 'electrical'. + metadata: Customizable extra data about the event. String key -> String value. + asset_ids: Asset IDs of related equipments that this event relates to. + asset_external_ids: Asset External IDs of related equipment that this event relates to. + asset_subtree_ids: Only include events that have a related asset in a subtree rooted at any of these assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. + asset_subtree_external_ids: Only include events that have a related asset in a subtree rooted at any of these assetExternalIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. + data_set_ids: Return only events in the specified data set(s) with this id / these ids. + data_set_external_ids: Return only events in the specified data set(s) with this external id / these external ids. + source: The source of this event. + created_time: Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. + last_updated_time: Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. + external_id_prefix: External Id provided by client. Should be unique within the project + sort: The criteria to sort by. Defaults to desc for `_score_` and asc for all other properties. Sort is not allowed if `partitions` is used. + limit: Maximum number of events to return. Defaults to return all items. + advanced_filter: Advanced filter query using the filter DSL (Domain Specific Language). It allows defining complex filtering expressions that combine simple operations, such as equals, prefix, exists, etc., using boolean operators and, or, and not. Yields: - Event | EventList: yields Event one by one if chunk_size is not specified, else EventList objects. + yields Event one by one if chunk_size is not specified, else EventList objects. """ # noqa: DOC404 asset_subtree_ids_processed = process_asset_subtree_ids(asset_subtree_ids, asset_subtree_external_ids) data_set_ids_processed = process_data_set_ids(data_set_ids, data_set_external_ids) @@ -176,11 +176,11 @@ async def retrieve(self, id: int | None = None, external_id: str | None = None) """`Retrieve a single event by id. `_ Args: - id (int | None): ID - external_id (str | None): External ID + id: ID + external_id: External ID Returns: - Event | None: Requested event or None if it does not exist. + Requested event or None if it does not exist. Examples: @@ -207,12 +207,12 @@ async def retrieve_multiple( """`Retrieve multiple events by id. `_ Args: - ids (Sequence[int] | None): IDs - external_ids (SequenceNotStr[str] | None): External IDs - ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception. + ids: IDs + external_ids: External IDs + ignore_unknown_ids: Ignore IDs and external IDs that are not found rather than throw an exception. Returns: - EventList: The requested events. + The requested events. Examples: @@ -242,13 +242,13 @@ async def aggregate_unique_values( """`Get unique properties with counts for events. `_ Args: - filter (EventFilter | dict[str, Any] | None): The filter to narrow down the events to count requiring exact match. - property (EventPropertyLike | None): The property name(s) to apply the aggregation on. - advanced_filter (Filter | dict[str, Any] | None): The filter to narrow down the events to consider. - aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets. + filter: The filter to narrow down the events to count requiring exact match. + property: The property name(s) to apply the aggregation on. + advanced_filter: The filter to narrow down the events to consider. + aggregate_filter: The filter to apply to the resulting buckets. Returns: - UniqueResultList: List of unique values of events matching the specified filters and search. + List of unique values of events matching the specified filters and search. Examples: @@ -301,13 +301,12 @@ async def aggregate_count( """`Count of event matching the specified filters. `_ Args: - property (EventPropertyLike | None): If specified, Get an approximate number of Events with a specific property - (property is not null) and matching the filters. - advanced_filter (Filter | dict[str, Any] | None): The filter to narrow down the events to count. - filter (EventFilter | dict[str, Any] | None): The filter to narrow down the events to count requiring exact match. + property: If specified, Get an approximate number of Events with a specific property (property is not null) and matching the filters. + advanced_filter: The filter to narrow down the events to count. + filter: The filter to narrow down the events to count requiring exact match. Returns: - int: The number of events matching the specified filters and search. + The number of events matching the specified filters and search. Examples: @@ -343,12 +342,12 @@ async def aggregate_cardinality_values( """`Find approximate property count for events. `_ Args: - property (EventPropertyLike): The property to count the cardinality of. - advanced_filter (Filter | dict[str, Any] | None): The filter to narrow down the events to count cardinality. - aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets. - filter (EventFilter | dict[str, Any] | None): The filter to narrow down the events to count requiring exact match. + property: The property to count the cardinality of. + advanced_filter: The filter to narrow down the events to count cardinality. + aggregate_filter: The filter to apply to the resulting buckets. + filter: The filter to narrow down the events to count requiring exact match. Returns: - int: The number of properties matching the specified filter. + The number of properties matching the specified filter. Examples: @@ -388,13 +387,12 @@ async def aggregate_cardinality_properties( """`Find approximate paths count for events. `_ Args: - path (EventPropertyLike): The scope in every document to aggregate properties. The only value allowed now is ["metadata"]. - It means to aggregate only metadata properties (aka keys). - advanced_filter (Filter | dict[str, Any] | None): The filter to narrow down the events to count cardinality. - aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets. - filter (EventFilter | dict[str, Any] | None): The filter to narrow down the events to count requiring exact match. + path: The scope in every document to aggregate properties. The only value allowed now is ["metadata"]. It means to aggregate only metadata properties (aka keys). + advanced_filter: The filter to narrow down the events to count cardinality. + aggregate_filter: The filter to apply to the resulting buckets. + filter: The filter to narrow down the events to count requiring exact match. Returns: - int: The number of properties matching the specified filters and search. + The number of properties matching the specified filters and search. Examples: @@ -426,14 +424,13 @@ async def aggregate_unique_properties( """`Get unique paths with counts for events. `_ Args: - path (EventPropertyLike): The scope in every document to aggregate properties. The only value allowed now is ["metadata"]. - It means to aggregate only metadata properties (aka keys). - advanced_filter (Filter | dict[str, Any] | None): The filter to narrow down the events to count cardinality. - aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets. - filter (EventFilter | dict[str, Any] | None): The filter to narrow down the events to count requiring exact match. + path: The scope in every document to aggregate properties. The only value allowed now is ["metadata"]. It means to aggregate only metadata properties (aka keys). + advanced_filter: The filter to narrow down the events to count cardinality. + aggregate_filter: The filter to apply to the resulting buckets. + filter: The filter to narrow down the events to count requiring exact match. Returns: - UniqueResultList: List of unique values of events matching the specified filters and search. + List of unique values of events matching the specified filters and search. Examples: @@ -465,10 +462,10 @@ async def create(self, event: Event | EventWrite | Sequence[Event] | Sequence[Ev """`Create one or more events. `_ Args: - event (Event | EventWrite | Sequence[Event] | Sequence[EventWrite]): Event or list of events to create. + event: Event or list of events to create. Returns: - Event | EventList: Created event(s) + Created event(s) Examples: @@ -494,9 +491,9 @@ async def delete( """`Delete one or more events `_ Args: - id (int | Sequence[int] | None): Id or list of ids - external_id (str | SequenceNotStr[str] | None): External ID or list of external ids - ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception. + id: Id or list of ids + external_id: External ID or list of external ids + ignore_unknown_ids: Ignore IDs and external IDs that are not found rather than throw an exception. Examples: @@ -535,11 +532,11 @@ async def update( """`Update one or more events `_ Args: - item (Event | EventWrite | EventUpdate | Sequence[Event | EventWrite | EventUpdate]): Event(s) to update - mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (Event or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. + item: Event(s) to update + mode: How to update data when a non-update object is given (Event or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. Returns: - Event | EventList: Updated event(s) + Updated event(s) Examples: @@ -572,12 +569,12 @@ async def search( Primarily meant for human-centric use-cases and data exploration, not for programs, since matching and ordering may change over time. Use the `list` function if stable or exact matches are required. Args: - description (str | None): Fuzzy match on description. - filter (EventFilter | dict[str, Any] | None): Filter to apply. Performs exact match on these fields. - limit (int): Maximum number of results to return. + description: Fuzzy match on description. + filter: Filter to apply. Performs exact match on these fields. + limit: Maximum number of results to return. Returns: - EventList: List of requested events + List of requested events Examples: @@ -610,11 +607,11 @@ async def upsert( For more details, see :ref:`appendix-upsert`. Args: - item (Event | EventWrite | Sequence[Event | EventWrite]): Event or list of events to upsert. - mode (Literal['patch', 'replace']): Whether to patch or replace in the case the events are existing. If you set 'patch', the call will only update fields with non-null values (default). Setting 'replace' will unset any fields that are not specified. + item: Event or list of events to upsert. + mode: Whether to patch or replace in the case the events are existing. If you set 'patch', the call will only update fields with non-null values (default). Setting 'replace' will unset any fields that are not specified. Returns: - Event | EventList: The upserted event(s). + The upserted event(s). Examples: @@ -667,29 +664,29 @@ async def list( """`List events `_ Args: - start_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. - end_time (dict[str, Any] | EndTimeFilter | None): Range between two timestamps. - active_at_time (dict[str, Any] | TimestampRange | None): Event is considered active from its startTime to endTime inclusive. If startTime is null, event is never active. If endTime is null, event is active from startTime onwards. activeAtTime filter will match all events that are active at some point from min to max, from min, or to max, depending on which of min and max parameters are specified. - type (str | None): Type of the event, e.g 'failure'. - subtype (str | None): Subtype of the event, e.g 'electrical'. - metadata (dict[str, str] | None): Customizable extra data about the event. String key -> String value. - asset_ids (Sequence[int] | None): Asset IDs of related equipments that this event relates to. - asset_external_ids (SequenceNotStr[str] | None): Asset External IDs of related equipment that this event relates to. - asset_subtree_ids (int | Sequence[int] | None): Only include events that have a related asset in a subtree rooted at any of these assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. - asset_subtree_external_ids (str | SequenceNotStr[str] | None): Only include events that have a related asset in a subtree rooted at any of these assetExternalIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. - data_set_ids (int | Sequence[int] | None): Return only events in the specified data set(s) with this id / these ids. - data_set_external_ids (str | SequenceNotStr[str] | None): Return only events in the specified data set(s) with this external id / these external ids. - source (str | None): The source of this event. - created_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. - last_updated_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. - external_id_prefix (str | None): External Id provided by client. Should be unique within the project. - sort (SortSpec | list[SortSpec] | None): The criteria to sort by. Defaults to desc for `_score_` and asc for all other properties. Sort is not allowed if `partitions` is used. - partitions (int | None): Retrieve resources in parallel using this number of workers (values up to 10 allowed), limit must be set to `None` (or `-1`). - limit (int | None): Maximum number of events to return. Defaults to 25. Set to -1, float("inf") or None to return all items. - advanced_filter (Filter | dict[str, Any] | None): Advanced filter query using the filter DSL (Domain Specific Language). It allows defining complex filtering expressions that combine simple operations, such as equals, prefix, exists, etc., using boolean operators and, or, and not. See examples below for usage. + start_time: Range between two timestamps. + end_time: Range between two timestamps. + active_at_time: Event is considered active from its startTime to endTime inclusive. If startTime is null, event is never active. If endTime is null, event is active from startTime onwards. activeAtTime filter will match all events that are active at some point from min to max, from min, or to max, depending on which of min and max parameters are specified. + type: Type of the event, e.g 'failure'. + subtype: Subtype of the event, e.g 'electrical'. + metadata: Customizable extra data about the event. String key -> String value. + asset_ids: Asset IDs of related equipments that this event relates to. + asset_external_ids: Asset External IDs of related equipment that this event relates to. + asset_subtree_ids: Only include events that have a related asset in a subtree rooted at any of these assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. + asset_subtree_external_ids: Only include events that have a related asset in a subtree rooted at any of these assetExternalIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. + data_set_ids: Return only events in the specified data set(s) with this id / these ids. + data_set_external_ids: Return only events in the specified data set(s) with this external id / these external ids. + source: The source of this event. + created_time: Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. + last_updated_time: Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. + external_id_prefix: External Id provided by client. Should be unique within the project. + sort: The criteria to sort by. Defaults to desc for `_score_` and asc for all other properties. Sort is not allowed if `partitions` is used. + partitions: Retrieve resources in parallel using this number of workers (values up to 10 allowed), limit must be set to `None` (or `-1`). + limit: Maximum number of events to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + advanced_filter: Advanced filter query using the filter DSL (Domain Specific Language). It allows defining complex filtering expressions that combine simple operations, such as equals, prefix, exists, etc., using boolean operators and, or, and not. See examples below for usage. Returns: - EventList: List of requested events + List of requested events .. note:: When using `partitions`, there are few considerations to keep in mind: diff --git a/cognite/client/_api/extractionpipelines/__init__.py b/cognite/client/_api/extractionpipelines/__init__.py index 24ea0ab9f8..12213f6971 100644 --- a/cognite/client/_api/extractionpipelines/__init__.py +++ b/cognite/client/_api/extractionpipelines/__init__.py @@ -48,11 +48,11 @@ async def __call__( """Iterate over extraction pipelines Args: - chunk_size (int | None): Number of extraction pipelines to yield per chunk. Defaults to yielding extraction pipelines one by one. - limit (int | None): Limits the number of results to be returned. Defaults to yielding all extraction pipelines. + chunk_size: Number of extraction pipelines to yield per chunk. Defaults to yielding extraction pipelines one by one. + limit: Limits the number of results to be returned. Defaults to yielding all extraction pipelines. Yields: - ExtractionPipeline | ExtractionPipelineList: Yields extraction pipelines one by one or in chunks up to the chunk size. + Yields extraction pipelines one by one or in chunks up to the chunk size. """ # noqa: DOC404 async for item in self._list_generator( method="GET", @@ -67,11 +67,11 @@ async def retrieve(self, id: int | None = None, external_id: str | None = None) """`Retrieve a single extraction pipeline by id. `_ Args: - id (int | None): ID - external_id (str | None): External ID + id: ID + external_id: External ID Returns: - ExtractionPipeline | None: Requested extraction pipeline or None if it does not exist. + Requested extraction pipeline or None if it does not exist. Examples: @@ -101,12 +101,12 @@ async def retrieve_multiple( """`Retrieve multiple extraction pipelines by ids and external ids. `_ Args: - ids (Sequence[int] | None): IDs - external_ids (SequenceNotStr[str] | None): External IDs - ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception. + ids: IDs + external_ids: External IDs + ignore_unknown_ids: Ignore IDs and external IDs that are not found rather than throw an exception. Returns: - ExtractionPipelineList: The requested ExtractionPipelines. + The requested ExtractionPipelines. Examples: @@ -133,10 +133,10 @@ async def list(self, limit: int | None = DEFAULT_LIMIT_READ) -> ExtractionPipeli """`List extraction pipelines `_ Args: - limit (int | None): Maximum number of ExtractionPipelines to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + limit: Maximum number of ExtractionPipelines to return. Defaults to 25. Set to -1, float("inf") or None to return all items. Returns: - ExtractionPipelineList: List of requested ExtractionPipelines + List of requested ExtractionPipelines Examples: @@ -172,10 +172,10 @@ async def create( You can create an arbitrary number of extraction pipelines, and the SDK will split the request into multiple requests if necessary. Args: - extraction_pipeline (ExtractionPipeline | ExtractionPipelineWrite | Sequence[ExtractionPipeline] | Sequence[ExtractionPipelineWrite]): Extraction pipeline or list of extraction pipelines to create. + extraction_pipeline: Extraction pipeline or list of extraction pipelines to create. Returns: - ExtractionPipeline | ExtractionPipelineList: Created extraction pipeline(s) + Created extraction pipeline(s) Examples: @@ -203,8 +203,8 @@ async def delete( """`Delete one or more extraction pipelines `_ Args: - id (int | Sequence[int] | None): Id or list of ids - external_id (str | SequenceNotStr[str] | None): External ID or list of external ids + id: Id or list of ids + external_id: External ID or list of external ids Examples: @@ -240,11 +240,11 @@ async def update( """`Update one or more extraction pipelines `_ Args: - item (ExtractionPipeline | ExtractionPipelineWrite | ExtractionPipelineUpdate | Sequence[ExtractionPipeline | ExtractionPipelineWrite | ExtractionPipelineUpdate]): Extraction pipeline(s) to update - mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (ExtractionPipeline or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. + item: Extraction pipeline(s) to update + mode: How to update data when a non-update object is given (ExtractionPipeline or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. Returns: - ExtractionPipeline | ExtractionPipelineList: Updated extraction pipeline(s) + Updated extraction pipeline(s) Examples: diff --git a/cognite/client/_api/extractionpipelines/configs.py b/cognite/client/_api/extractionpipelines/configs.py index f2c0c24875..bd79d0e278 100644 --- a/cognite/client/_api/extractionpipelines/configs.py +++ b/cognite/client/_api/extractionpipelines/configs.py @@ -20,12 +20,12 @@ async def retrieve( By default the latest configuration revision is retrieved, or you can specify a timestamp or a revision number. Args: - external_id (str): External id of the extraction pipeline to retrieve config from. - revision (int | None): Optionally specify a revision number to retrieve. - active_at_time (int | None): Optionally specify a timestamp the configuration revision should be active. + external_id: External id of the extraction pipeline to retrieve config from. + revision: Optionally specify a revision number to retrieve. + active_at_time: Optionally specify a timestamp the configuration revision should be active. Returns: - ExtractionPipelineConfig: Retrieved extraction pipeline configuration revision + Retrieved extraction pipeline configuration revision Examples: @@ -47,10 +47,10 @@ async def list(self, external_id: str) -> ExtractionPipelineConfigRevisionList: """`Retrieve all configuration revisions from an extraction pipeline ` Args: - external_id (str): External id of the extraction pipeline to retrieve config from. + external_id: External id of the extraction pipeline to retrieve config from. Returns: - ExtractionPipelineConfigRevisionList: Retrieved extraction pipeline configuration revisions + Retrieved extraction pipeline configuration revisions Examples: @@ -74,10 +74,10 @@ async def create( """`Create a new configuration revision ` Args: - config (ExtractionPipelineConfig | ExtractionPipelineConfigWrite): Configuration revision to create. + config: Configuration revision to create. Returns: - ExtractionPipelineConfig: Created extraction pipeline configuration revision + Created extraction pipeline configuration revision Examples: @@ -99,11 +99,11 @@ async def revert(self, external_id: str, revision: int) -> ExtractionPipelineCon """`Revert to a previous configuration revision ` Args: - external_id (str): External id of the extraction pipeline to revert revision for. - revision (int): Revision to revert to. + external_id: External id of the extraction pipeline to revert revision for. + revision: Revision to revert to. Returns: - ExtractionPipelineConfig: New latest extraction pipeline configuration revision. + New latest extraction pipeline configuration revision. Examples: diff --git a/cognite/client/_api/extractionpipelines/runs.py b/cognite/client/_api/extractionpipelines/runs.py index a4e2efcfa7..12e6e56af1 100644 --- a/cognite/client/_api/extractionpipelines/runs.py +++ b/cognite/client/_api/extractionpipelines/runs.py @@ -37,15 +37,14 @@ async def list( """`List runs for an extraction pipeline with given external_id `_ Args: - external_id (str): Extraction pipeline external Id. - statuses (RunStatus | Sequence[RunStatus] | SequenceNotStr[str] | None): One or more among "success" / "failure" / "seen". - message_substring (str | None): Failure message part. - created_time (dict[str, Any] | TimestampRange | str | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as timestamps in ms. - If a string is passed, it is assumed to be the minimum value. - limit (int | None): Maximum number of ExtractionPipelines to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + external_id: Extraction pipeline external Id. + statuses: One or more among "success" / "failure" / "seen". + message_substring: Failure message part. + created_time: Range between two timestamps. Possible keys are `min` and `max`, with values given as timestamps in ms. If a string is passed, it is assumed to be the minimum value. + limit: Maximum number of ExtractionPipelines to return. Defaults to 25. Set to -1, float("inf") or None to return all items. Returns: - ExtractionPipelineRunList: List of requested extraction pipeline runs + List of requested extraction pipeline runs Tip: The ``created_time`` parameter can also be passed as a string, to support the most typical usage pattern @@ -116,10 +115,10 @@ async def create( You can create an arbitrary number of extraction pipeline runs, and the SDK will split the request into multiple requests. Args: - run (ExtractionPipelineRun | ExtractionPipelineRunWrite | Sequence[ExtractionPipelineRun] | Sequence[ExtractionPipelineRunWrite]): ExtractionPipelineRun| ExtractionPipelineRunWrite | Sequence[ExtractionPipelineRun] | Sequence[ExtractionPipelineRunWrite]): Extraction pipeline or list of extraction pipeline runs to create. + run: Extraction pipeline or list of extraction pipeline runs to create. Returns: - ExtractionPipelineRun | ExtractionPipelineRunList: Created extraction pipeline run(s) + Created extraction pipeline run(s) Examples: diff --git a/cognite/client/_api/files.py b/cognite/client/_api/files.py index 2d1f853db9..bffc478a5e 100644 --- a/cognite/client/_api/files.py +++ b/cognite/client/_api/files.py @@ -120,31 +120,31 @@ async def __call__( Fetches file metadata objects as they are iterated over, so you keep a limited number of metadata objects in memory. Args: - chunk_size (int | None): Number of files to return in each chunk. Defaults to yielding one event a time. - name (str | None): Name of the file. - mime_type (str | None): File type. E.g. text/plain, application/pdf, .. - metadata (dict[str, str] | None): Custom, application specific metadata. String key -> String value - asset_ids (Sequence[int] | None): Only include files that reference these specific asset IDs. - asset_external_ids (SequenceNotStr[str] | None): No description. - asset_subtree_ids (int | Sequence[int] | None): Only include files that have a related asset in a subtree rooted at any of these assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. - asset_subtree_external_ids (str | SequenceNotStr[str] | None): Only include files that have a related asset in a subtree rooted at any of these assetExternalIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. - data_set_ids (int | Sequence[int] | None): Return only files in the specified data set(s) with this id / these ids. - data_set_external_ids (str | SequenceNotStr[str] | None): Return only files in the specified data set(s) with this external id / these external ids. - labels (LabelFilter | None): Return only the files matching the specified label(s). - geo_location (GeoLocationFilter | None): Only include files matching the specified geographic relation. - source (str | None): The source of this event. - created_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. - last_updated_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. - source_created_time (dict[str, Any] | TimestampRange | None): Filter for files where the sourceCreatedTime field has been set and is within the specified range. - source_modified_time (dict[str, Any] | TimestampRange | None): Filter for files where the sourceModifiedTime field has been set and is within the specified range. - uploaded_time (dict[str, Any] | TimestampRange | None): Range between two timestamps - external_id_prefix (str | None): External Id provided by client. Should be unique within the project. - directory_prefix (str | None): Filter by this (case-sensitive) prefix for the directory provided by the client. - uploaded (bool | None): Whether or not the actual file is uploaded. This field is returned only by the API, it has no effect in a post body. - limit (int | None): Maximum number of files to return. Defaults to return all items. + chunk_size: Number of files to return in each chunk. Defaults to yielding one event a time. + name: Name of the file. + mime_type: File type. E.g. text/plain, application/pdf, .. + metadata: Custom, application specific metadata. String key -> String value + asset_ids: Only include files that reference these specific asset IDs. + asset_external_ids: No description. + asset_subtree_ids: Only include files that have a related asset in a subtree rooted at any of these assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. + asset_subtree_external_ids: Only include files that have a related asset in a subtree rooted at any of these assetExternalIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. + data_set_ids: Return only files in the specified data set(s) with this id / these ids. + data_set_external_ids: Return only files in the specified data set(s) with this external id / these external ids. + labels: Return only the files matching the specified label(s). + geo_location: Only include files matching the specified geographic relation. + source: The source of this event. + created_time: Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. + last_updated_time: Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. + source_created_time: Filter for files where the sourceCreatedTime field has been set and is within the specified range. + source_modified_time: Filter for files where the sourceModifiedTime field has been set and is within the specified range. + uploaded_time: Range between two timestamps + external_id_prefix: External Id provided by client. Should be unique within the project. + directory_prefix: Filter by this (case-sensitive) prefix for the directory provided by the client. + uploaded: Whether or not the actual file is uploaded. This field is returned only by the API, it has no effect in a post body. + limit: Maximum number of files to return. Defaults to return all items. Yields: - FileMetadata | FileMetadataList: yields FileMetadata one by one if chunk_size is not specified, else FileMetadataList objects. + yields FileMetadata one by one if chunk_size is not specified, else FileMetadataList objects. """ # noqa: DOC404 asset_subtree_ids_processed = process_asset_subtree_ids(asset_subtree_ids, asset_subtree_external_ids) data_set_ids_processed = process_data_set_ids(data_set_ids, data_set_external_ids) @@ -186,11 +186,11 @@ async def create( """Create file without uploading content. Args: - file_metadata (FileMetadata | FileMetadataWrite): File metadata for the file to create. - overwrite (bool): If 'overwrite' is set to true, and the POST body content specifies a 'externalId' field, fields for the file found for externalId can be overwritten. The default setting is false. If metadata is included in the request body, all of the original metadata will be overwritten. File-Asset mappings only change if explicitly stated in the assetIds field of the POST json body. Do not set assetIds in request body if you want to keep the current file-asset mappings. + file_metadata: File metadata for the file to create. + overwrite: If 'overwrite' is set to true, and the POST body content specifies a 'externalId' field, fields for the file found for externalId can be overwritten. The default setting is false. If metadata is included in the request body, all of the original metadata will be overwritten. File-Asset mappings only change if explicitly stated in the assetIds field of the POST json body. Do not set assetIds in request body if you want to keep the current file-asset mappings. Returns: - tuple[FileMetadata, str]: Tuple containing the file metadata and upload url of the created file. + Tuple containing the file metadata and upload url of the created file. Examples: @@ -223,12 +223,12 @@ async def retrieve( """`Retrieve a single file metadata by id. `_ Args: - id (int | None): ID - external_id (str | None): External ID - instance_id (NodeId | None): Instance ID + id: ID + external_id: External ID + instance_id: Instance ID Returns: - FileMetadata | None: Requested file metadata or None if it does not exist. + Requested file metadata or None if it does not exist. Examples: @@ -258,13 +258,13 @@ async def retrieve_multiple( """`Retrieve multiple file metadatas by id. `_ Args: - ids (Sequence[int] | None): IDs - external_ids (SequenceNotStr[str] | None): External IDs - instance_ids (Sequence[NodeId] | None): Instance IDs - ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception. + ids: IDs + external_ids: External IDs + instance_ids: Instance IDs + ignore_unknown_ids: Ignore IDs and external IDs that are not found rather than throw an exception. Returns: - FileMetadataList: The requested file metadatas. + The requested file metadatas. Examples: @@ -291,10 +291,10 @@ async def aggregate_count(self, filter: FileMetadataFilter | dict[str, Any] | No """`Aggregate files `_ Args: - filter (FileMetadataFilter | dict[str, Any] | None): Filter on file metadata filter with exact match + filter: Filter on file metadata filter with exact match Returns: - int: Count of files matching the filter. + Count of files matching the filter. Examples: @@ -316,9 +316,9 @@ async def delete( """`Delete files `_ Args: - id (int | Sequence[int] | None): Id or list of ids - external_id (str | SequenceNotStr[str] | None): str or list of str - ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception. + id: Id or list of ids + external_id: str or list of str + ignore_unknown_ids: Ignore IDs and external IDs that are not found rather than throw an exception. Examples: @@ -361,11 +361,11 @@ async def update( Currently, a full replacement of labels on a file is not supported (only partial add/remove updates). See the example below on how to perform partial labels update. Args: - item (FileMetadata | FileMetadataWrite | FileMetadataUpdate | Sequence[FileMetadata | FileMetadataWrite | FileMetadataUpdate]): file(s) to update. - mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (FilesMetadata or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. + item: file(s) to update. + mode: How to update data when a non-update object is given (FilesMetadata or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. Returns: - FileMetadata | FileMetadataList: The updated files. + The updated files. Examples: @@ -415,12 +415,12 @@ async def search( Primarily meant for human-centric use-cases and data exploration, not for programs, since matching and ordering may change over time. Use the `list` function if stable or exact matches are required. Args: - name (str | None): Prefix and fuzzy search on name. - filter (FileMetadataFilter | dict[str, Any] | None): Filter to apply. Performs exact match on these fields. - limit (int): Max number of results to return. + name: Prefix and fuzzy search on name. + filter: Filter to apply. Performs exact match on these fields. + limit: Max number of results to return. Returns: - FileMetadataList: List of requested files metadata. + List of requested files metadata. Examples: @@ -447,11 +447,11 @@ async def upload_content( """`Upload a file content `_ Args: - path (Path | str): Path to the file you wish to upload. - external_id (str | None): The external ID provided by the client. Must be unique within the project. - instance_id (NodeId | None): Instance ID of the file. + path: Path to the file you wish to upload. + external_id: The external ID provided by the client. Must be unique within the project. + instance_id: Instance ID of the file. Returns: - FileMetadata: No description. + No description. """ path = Path(path) if path.is_file(): @@ -483,25 +483,25 @@ async def upload( """`Upload a file `_ Args: - path (Path | str): Path to the file you wish to upload. If path is a directory, this method will upload all files in that directory. - external_id (str | None): The external ID provided by the client. Must be unique within the project. - name (str | None): Name of the file. - source (str | None): The source of the file. - mime_type (str | None): File type. E.g. text/plain, application/pdf, ... - metadata (dict[str, str] | None): Customizable extra data about the file. String key -> String value. - directory (str | None): The directory to be associated with this file. Must be an absolute, unix-style path. - asset_ids (Sequence[int] | None): No description. - source_created_time (int | None): The timestamp for when the file was originally created in the source system. - source_modified_time (int | None): The timestamp for when the file was last modified in the source system. - data_set_id (int | None): ID of the data set. - labels (Sequence[Label] | None): A list of the labels associated with this resource item. - geo_location (GeoLocation | None): The geographic metadata of the file. - security_categories (Sequence[int] | None): Security categories to attach to this file. - recursive (bool): If path is a directory, upload all contained files recursively. - overwrite (bool): If 'overwrite' is set to true, and the POST body content specifies a 'externalId' field, fields for the file found for externalId can be overwritten. The default setting is false. If metadata is included in the request body, all of the original metadata will be overwritten. The actual file will be overwritten after successful upload. If there is no successful upload, the current file contents will be kept. File-Asset mappings only change if explicitly stated in the assetIds field of the POST json body. Do not set assetIds in request body if you want to keep the current file-asset mappings. + path: Path to the file you wish to upload. If path is a directory, this method will upload all files in that directory. + external_id: The external ID provided by the client. Must be unique within the project. + name: Name of the file. + source: The source of the file. + mime_type: File type. E.g. text/plain, application/pdf, ... + metadata: Customizable extra data about the file. String key -> String value. + directory: The directory to be associated with this file. Must be an absolute, unix-style path. + asset_ids: No description. + source_created_time: The timestamp for when the file was originally created in the source system. + source_modified_time: The timestamp for when the file was last modified in the source system. + data_set_id: ID of the data set. + labels: A list of the labels associated with this resource item. + geo_location: The geographic metadata of the file. + security_categories: Security categories to attach to this file. + recursive: If path is a directory, upload all contained files recursively. + overwrite: If 'overwrite' is set to true, and the POST body content specifies a 'externalId' field, fields for the file found for externalId can be overwritten. The default setting is false. If metadata is included in the request body, all of the original metadata will be overwritten. The actual file will be overwritten after successful upload. If there is no successful upload, the current file contents will be kept. File-Asset mappings only change if explicitly stated in the assetIds field of the POST json body. Do not set assetIds in request body if you want to keep the current file-asset mappings. Returns: - FileMetadata | FileMetadataList: The file metadata of the uploaded file(s). + The file metadata of the uploaded file(s). Examples: @@ -594,12 +594,12 @@ async def upload_content_bytes( Note that the maximum file size is 5GiB. In order to upload larger files use `multipart_upload_content_session`. Args: - content (str | bytes | BinaryIO): The content to upload. - external_id (str | None): The external ID provided by the client. Must be unique within the project. - instance_id (NodeId | None): Instance ID of the file. + content: The content to upload. + external_id: The external ID provided by the client. Must be unique within the project. + instance_id: Instance ID of the file. Returns: - FileMetadata: No description. + No description. Examples: @@ -693,24 +693,24 @@ async def upload_bytes( Note that the maximum file size is 5GiB. In order to upload larger files use `multipart_upload_session`. Args: - content (str | bytes | BinaryIO | AsyncIterator[bytes]): The content to upload. - name (str): Name of the file. - external_id (str | None): The external ID provided by the client. Must be unique within the project. - source (str | None): The source of the file. - mime_type (str | None): File type. E.g. text/plain, application/pdf,... - metadata (dict[str, str] | None): Customizable extra data about the file. String key -> String value. - directory (str | None): The directory to be associated with this file. Must be an absolute, unix-style path. - asset_ids (Sequence[int] | None): No description. - data_set_id (int | None): Id of the data set. - labels (Sequence[Label] | None): A list of the labels associated with this resource item. - geo_location (GeoLocation | None): The geographic metadata of the file. - source_created_time (int | None): The timestamp for when the file was originally created in the source system. - source_modified_time (int | None): The timestamp for when the file was last modified in the source system. - security_categories (Sequence[int] | None): Security categories to attach to this file. - overwrite (bool): If 'overwrite' is set to true, and the POST body content specifies a 'externalId' field, fields for the file found for externalId can be overwritten. The default setting is false. If metadata is included in the request body, all of the original metadata will be overwritten. The actual file will be overwritten after successful upload. If there is no successful upload, the current file contents will be kept. File-Asset mappings only change if explicitly stated in the assetIds field of the POST json body. Do not set assetIds in request body if you want to keep the current file-asset mappings. + content: The content to upload. + name: Name of the file. + external_id: The external ID provided by the client. Must be unique within the project. + source: The source of the file. + mime_type: File type. E.g. text/plain, application/pdf,... + metadata: Customizable extra data about the file. String key -> String value. + directory: The directory to be associated with this file. Must be an absolute, unix-style path. + asset_ids: No description. + data_set_id: Id of the data set. + labels: A list of the labels associated with this resource item. + geo_location: The geographic metadata of the file. + source_created_time: The timestamp for when the file was originally created in the source system. + source_modified_time: The timestamp for when the file was last modified in the source system. + security_categories: Security categories to attach to this file. + overwrite: If 'overwrite' is set to true, and the POST body content specifies a 'externalId' field, fields for the file found for externalId can be overwritten. The default setting is false. If metadata is included in the request body, all of the original metadata will be overwritten. The actual file will be overwritten after successful upload. If there is no successful upload, the current file contents will be kept. File-Asset mappings only change if explicitly stated in the assetIds field of the POST json body. Do not set assetIds in request body if you want to keep the current file-asset mappings. Returns: - FileMetadata: The metadata of the uploaded file. + The metadata of the uploaded file. Examples: @@ -787,24 +787,24 @@ async def multipart_upload_session( for each part before exiting. It also supports async usage with `async with`, then calling `await upload_part_async`. Args: - name (str): Name of the file. - parts (int): The number of parts to upload, must be between 1 and 250. - external_id (str | None): The external ID provided by the client. Must be unique within the project. - source (str | None): The source of the file. - mime_type (str | None): File type. E.g. text/plain, application/pdf,... - metadata (dict[str, str] | None): Customizable extra data about the file. String key -> String value. - directory (str | None): The directory to be associated with this file. Must be an absolute, unix-style path. - asset_ids (Sequence[int] | None): No description. - data_set_id (int | None): Id of the data set. - labels (Sequence[Label] | None): A list of the labels associated with this resource item. - geo_location (GeoLocation | None): The geographic metadata of the file. - source_created_time (int | None): The timestamp for when the file was originally created in the source system. - source_modified_time (int | None): The timestamp for when the file was last modified in the source system. - security_categories (Sequence[int] | None): Security categories to attach to this file. - overwrite (bool): If 'overwrite' is set to true, and the POST body content specifies a 'externalId' field, fields for the file found for externalId can be overwritten. The default setting is false. If metadata is included in the request body, all of the original metadata will be overwritten. The actual file will be overwritten after successful upload. If there is no successful upload, the current file contents will be kept. File-Asset mappings only change if explicitly stated in the assetIds field of the POST json body. Do not set assetIds in request body if you want to keep the current file-asset mappings. + name: Name of the file. + parts: The number of parts to upload, must be between 1 and 250. + external_id: The external ID provided by the client. Must be unique within the project. + source: The source of the file. + mime_type: File type. E.g. text/plain, application/pdf,... + metadata: Customizable extra data about the file. String key -> String value. + directory: The directory to be associated with this file. Must be an absolute, unix-style path. + asset_ids: No description. + data_set_id: Id of the data set. + labels: A list of the labels associated with this resource item. + geo_location: The geographic metadata of the file. + source_created_time: The timestamp for when the file was originally created in the source system. + source_modified_time: The timestamp for when the file was last modified in the source system. + security_categories: Security categories to attach to this file. + overwrite: If 'overwrite' is set to true, and the POST body content specifies a 'externalId' field, fields for the file found for externalId can be overwritten. The default setting is false. If metadata is included in the request body, all of the original metadata will be overwritten. The actual file will be overwritten after successful upload. If there is no successful upload, the current file contents will be kept. File-Asset mappings only change if explicitly stated in the assetIds field of the POST json body. Do not set assetIds in request body if you want to keep the current file-asset mappings. Returns: - FileMultipartUploadSession: Object containing metadata about the created file, and information needed to upload the file content. Use this object to manage the file upload, and `exit` it once all parts are uploaded. + Object containing metadata about the created file, and information needed to upload the file content. Use this object to manage the file upload, and `exit` it once all parts are uploaded. Examples: @@ -878,12 +878,12 @@ async def multipart_upload_content_session( for each part before exiting. It also supports async usage with `async with`, then calling `await upload_part_async`. Args: - parts (int): The number of parts to upload, must be between 1 and 250. - external_id (str | None): The external ID provided by the client. Must be unique within the project. - instance_id (NodeId | None): Instance ID of the file. + parts: The number of parts to upload, must be between 1 and 250. + external_id: The external ID provided by the client. Must be unique within the project. + instance_id: Instance ID of the file. Returns: - FileMultipartUploadSession: Object containing metadata about the created file, and information needed to upload the file content. Use this object to manage the file upload, and `exit` it once all parts are uploaded. + Object containing metadata about the created file, and information needed to upload the file content. Use this object to manage the file upload, and `exit` it once all parts are uploaded. Examples: @@ -931,8 +931,8 @@ async def _upload_multipart_part(self, upload_url: str, content: str | bytes | B If `content` does not somehow expose its length, this method may not work on Azure or AWS. Args: - upload_url (str): URL to upload file chunk to. - content (str | bytes | BinaryIO): The content to upload. + upload_url: URL to upload file chunk to. + content: The content to upload. """ headers = {"accept": "*/*"} file_size, file_content = prepare_content_for_upload(content) @@ -954,7 +954,7 @@ async def _complete_multipart_upload(self, session: FileMultipartUploadSession) """Complete a multipart upload. Once this returns the file can be downloaded. Args: - session (FileMultipartUploadSession): Multipart upload session returned from + session: Multipart upload session returned from """ await self._post( self._RESOURCE_PATH + "/completemultipartupload", @@ -972,13 +972,13 @@ async def retrieve_download_urls( """Get download links by id or external id Args: - id (int | Sequence[int] | None): Id or list of ids. - external_id (str | SequenceNotStr[str] | None): External id or list of external ids. - instance_id (NodeId | Sequence[NodeId] | None): Instance id or list of instance ids. - extended_expiration (bool): Extend expiration time of download url to 1 hour. Defaults to false. + id: Id or list of ids. + external_id: External id or list of external ids. + instance_id: Instance id or list of instance ids. + extended_expiration: Extend expiration time of download url to 1 hour. Defaults to false. Returns: - dict[int | str | NodeId, str]: Dictionary containing download urls. + Dictionary containing download urls. """ identifiers = IdentifierSequence.load(ids=id, external_ids=external_id, instance_ids=instance_id) @@ -1052,13 +1052,12 @@ async def download( the files missing. A warning is issued when this happens, listing the affected files. Args: - directory (str | Path): Directory to download the file(s) to. - id (int | Sequence[int] | None): Id or list of ids - external_id (str | SequenceNotStr[str] | None): External ID or list of external ids. - instance_id (NodeId | Sequence[NodeId] | None): Instance ID or list of instance ids. - keep_directory_structure (bool): Whether or not to keep the directory hierarchy in CDF, - creating subdirectories as needed below the given directory. - resolve_duplicate_file_names (bool): Whether or not to resolve duplicate file names by appending a number on duplicate file names + directory: Directory to download the file(s) to. + id: Id or list of ids + external_id: External ID or list of external ids. + instance_id: Instance ID or list of instance ids. + keep_directory_structure: Whether or not to keep the directory hierarchy in CDF, creating subdirectories as needed below the given directory. + resolve_duplicate_file_names: Whether or not to resolve duplicate file names by appending a number on duplicate file names Examples: @@ -1196,10 +1195,10 @@ async def download_to_path( """Download a file to a specific target. Args: - path (Path | str): Download to this path. - id (int | None): Id of of the file to download. - external_id (str | None): External id of the file to download. - instance_id (NodeId | None): Instance id of the file to download. + path: Download to this path. + id: Id of of the file to download. + external_id: External id of the file to download. + instance_id: Instance id of the file to download. Examples: @@ -1223,9 +1222,9 @@ async def download_bytes( """Download a file as bytes. Args: - id (int | None): Id of the file - external_id (str | None): External id of the file - instance_id (NodeId | None): Instance id of the file + id: Id of the file + external_id: External id of the file + instance_id: Instance id of the file Examples: @@ -1237,7 +1236,7 @@ async def download_bytes( >>> file_content = client.files.download_bytes(id=1) Returns: - bytes: The file in binary format + The file in binary format """ identifier = Identifier.of_either(id, external_id, instance_id).as_dict() download_link = await self._get_download_link(identifier) @@ -1281,31 +1280,31 @@ async def list( """`List files `_ Args: - name (str | None): Name of the file. - mime_type (str | None): File type. E.g. text/plain, application/pdf, .. - metadata (dict[str, str] | None): Custom, application specific metadata. String key -> String value - asset_ids (Sequence[int] | None): Only include files that reference these specific asset IDs. - asset_external_ids (SequenceNotStr[str] | None): No description. - asset_subtree_ids (int | Sequence[int] | None): Only include files that have a related asset in a subtree rooted at any of these assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. - asset_subtree_external_ids (str | SequenceNotStr[str] | None): Only include files that have a related asset in a subtree rooted at any of these assetExternalIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. - data_set_ids (int | Sequence[int] | None): Return only files in the specified data set(s) with this id / these ids. - data_set_external_ids (str | SequenceNotStr[str] | None): Return only files in the specified data set(s) with this external id / these external ids. - labels (LabelFilter | None): Return only the files matching the specified label filter(s). - geo_location (GeoLocationFilter | None): Only include files matching the specified geographic relation. - source (str | None): The source of this event. - created_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. - last_updated_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. - source_created_time (dict[str, Any] | TimestampRange | None): Filter for files where the sourceCreatedTime field has been set and is within the specified range. - source_modified_time (dict[str, Any] | TimestampRange | None): Filter for files where the sourceModifiedTime field has been set and is within the specified range. - uploaded_time (dict[str, Any] | TimestampRange | None): Range between two timestamps - external_id_prefix (str | None): External Id provided by client. Should be unique within the project. - directory_prefix (str | None): Filter by this (case-sensitive) prefix for the directory provided by the client. - uploaded (bool | None): Whether or not the actual file is uploaded. This field is returned only by the API, it has no effect in a post body. - limit (int | None): Max number of files to return. Defaults to 25. Set to -1, float("inf") or None to return all items. - partitions (int | None): Retrieve resources in parallel using this number of workers (values up to 10 allowed), limit must be set to `None` (or `-1`). + name: Name of the file. + mime_type: File type. E.g. text/plain, application/pdf, .. + metadata: Custom, application specific metadata. String key -> String value + asset_ids: Only include files that reference these specific asset IDs. + asset_external_ids: No description. + asset_subtree_ids: Only include files that have a related asset in a subtree rooted at any of these assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. + asset_subtree_external_ids: Only include files that have a related asset in a subtree rooted at any of these assetExternalIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. + data_set_ids: Return only files in the specified data set(s) with this id / these ids. + data_set_external_ids: Return only files in the specified data set(s) with this external id / these external ids. + labels: Return only the files matching the specified label filter(s). + geo_location: Only include files matching the specified geographic relation. + source: The source of this event. + created_time: Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. + last_updated_time: Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. + source_created_time: Filter for files where the sourceCreatedTime field has been set and is within the specified range. + source_modified_time: Filter for files where the sourceModifiedTime field has been set and is within the specified range. + uploaded_time: Range between two timestamps + external_id_prefix: External Id provided by client. Should be unique within the project. + directory_prefix: Filter by this (case-sensitive) prefix for the directory provided by the client. + uploaded: Whether or not the actual file is uploaded. This field is returned only by the API, it has no effect in a post body. + limit: Max number of files to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + partitions: Retrieve resources in parallel using this number of workers (values up to 10 allowed), limit must be set to `None` (or `-1`). Returns: - FileMetadataList: The requested files. + The requested files. Examples: diff --git a/cognite/client/_api/functions/__init__.py b/cognite/client/_api/functions/__init__.py index 54b4d9bdf6..9e1c46a9e7 100644 --- a/cognite/client/_api/functions/__init__.py +++ b/cognite/client/_api/functions/__init__.py @@ -115,18 +115,18 @@ async def __call__( """Iterate over functions. Args: - chunk_size (int | None): Number of functions to yield per chunk. Defaults to yielding functions one by one. - name (str | None): The name of the function. - owner (str | None): Owner of the function. - file_id (int | None): The file ID of the zip-file used to create the function. - status (FunctionStatus | None): Status of the function. Possible values: ["Queued", "Deploying", "Ready", "Failed"]. - external_id_prefix (str | None): External ID prefix to filter on. - created_time (dict[Literal['min', 'max'], int] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. - metadata (dict[str, str] | None): No description. - limit (int | None): Maximum number of functions to return. Defaults to yielding all functions. + chunk_size: Number of functions to yield per chunk. Defaults to yielding functions one by one. + name: The name of the function. + owner: Owner of the function. + file_id: The file ID of the zip-file used to create the function. + status: Status of the function. Possible values: ["Queued", "Deploying", "Ready", "Failed"]. + external_id_prefix: External ID prefix to filter on. + created_time: Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. + metadata: No description. + limit: Maximum number of functions to return. Defaults to yielding all functions. Yields: - Function | FunctionList: An iterator over functions. + An iterator over functions. """ # noqa: DOC404 # The _list_generator method is not used as the /list endpoint does not # respond with a cursor (pagination is not supported) @@ -185,28 +185,27 @@ async def create( For help with troubleshooting, please see `this page. `_ Args: - name (str | FunctionWrite): The name of the function or a FunctionWrite object. If a FunctionWrite - object is passed, all other arguments are ignored. - folder (str | None): Path to the folder where the function source code is located. - file_id (int | None): File ID of the code uploaded to the Files API. - function_path (str): Relative path from the root folder to the file containing the `handle` function. Defaults to `handler.py`. Must be on POSIX path format. - function_handle (FunctionHandle | None): Reference to a function object, which must be named `handle`. - external_id (str | None): External id of the function. - description (str | None): Description of the function. - owner (str | None): Owner of this function. Typically used to know who created it. - secrets (dict[str, str] | None): Additional secrets as key/value pairs. These can e.g. password to simulators or other data sources. Keys must be lowercase characters, numbers or dashes (-) and at most 15 characters. You can create at most 30 secrets, all keys must be unique. - env_vars (dict[str, str] | None): Environment variables as key/value pairs. Keys can contain only letters, numbers or the underscore character. You can create at most 100 environment variables. - cpu (float | None): Number of CPU cores per function. Allowed range and default value are given by the `limits endpoint. `_, and None translates to the API default. On Azure, only the default value is used. - memory (float | None): Memory per function measured in GB. Allowed range and default value are given by the `limits endpoint. `_, and None translates to the API default. On Azure, only the default value is used. - runtime (RunTime | None): The function runtime. Valid values are ["py310", "py311", "py312", `None`], and `None` translates to the API default which will change over time. The runtime "py312" resolves to the latest version of the Python 3.12 series. - metadata (dict[str, str] | None): Metadata for the function as key/value pairs. Key & values can be at most 32, 512 characters long respectively. You can have at the most 16 key-value pairs, with a maximum size of 512 bytes. - index_url (str | None): Index URL for Python Package Manager to use. Be aware of the intrinsic security implications of using the `index_url` option. `More information can be found on official docs, `_ - extra_index_urls (list[str] | None): Extra Index URLs for Python Package Manager to use. Be aware of the intrinsic security implications of using the `extra_index_urls` option. `More information can be found on official docs, `_ - skip_folder_validation (bool): When creating a function using the 'folder' argument, pass True to skip the extra validation step that attempts to import the module. Skipping can be useful when your function requires several heavy packages to already be installed locally. Defaults to False. - data_set_id (int | None): Data set to upload the function code to. Note: Does not affect the function itself. + name: The name of the function or a FunctionWrite object. If a FunctionWrite object is passed, all other arguments are ignored. + folder: Path to the folder where the function source code is located. + file_id: File ID of the code uploaded to the Files API. + function_path: Relative path from the root folder to the file containing the `handle` function. Defaults to `handler.py`. Must be on POSIX path format. + function_handle: Reference to a function object, which must be named `handle`. + external_id: External id of the function. + description: Description of the function. + owner: Owner of this function. Typically used to know who created it. + secrets: Additional secrets as key/value pairs. These can e.g. password to simulators or other data sources. Keys must be lowercase characters, numbers or dashes (-) and at most 15 characters. You can create at most 30 secrets, all keys must be unique. + env_vars: Environment variables as key/value pairs. Keys can contain only letters, numbers or the underscore character. You can create at most 100 environment variables. + cpu: Number of CPU cores per function. Allowed range and default value are given by the `limits endpoint. `_, and None translates to the API default. On Azure, only the default value is used. + memory: Memory per function measured in GB. Allowed range and default value are given by the `limits endpoint. `_, and None translates to the API default. On Azure, only the default value is used. + runtime: The function runtime. Valid values are ["py310", "py311", "py312", `None`], and `None` translates to the API default which will change over time. The runtime "py312" resolves to the latest version of the Python 3.12 series. + metadata: Metadata for the function as key/value pairs. Key & values can be at most 32, 512 characters long respectively. You can have at the most 16 key-value pairs, with a maximum size of 512 bytes. + index_url: Index URL for Python Package Manager to use. Be aware of the intrinsic security implications of using the `index_url` option. `More information can be found on official docs, `_ + extra_index_urls: Extra Index URLs for Python Package Manager to use. Be aware of the intrinsic security implications of using the `extra_index_urls` option. `More information can be found on official docs, `_ + skip_folder_validation: When creating a function using the 'folder' argument, pass True to skip the extra validation step that attempts to import the module. Skipping can be useful when your function requires several heavy packages to already be installed locally. Defaults to False. + data_set_id: Data set to upload the function code to. Note: Does not affect the function itself. Returns: - Function: The created function. + The created function. Examples: @@ -345,8 +344,8 @@ async def delete( """`Delete one or more functions. `_ Args: - id (int | Sequence[int] | None): Id or list of ids. - external_id (str | SequenceNotStr[str] | None): External ID or list of external ids. + id: Id or list of ids. + external_id: External ID or list of external ids. Example: @@ -376,17 +375,17 @@ async def list( """`List all functions. `_ Args: - name (str | None): The name of the function. - owner (str | None): Owner of the function. - file_id (int | None): The file ID of the zip-file used to create the function. - status (FunctionStatus | None): Status of the function. Possible values: ["Queued", "Deploying", "Ready", "Failed"]. - external_id_prefix (str | None): External ID prefix to filter on. - created_time (dict[Literal['min', 'max'], int] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. - metadata (dict[str, str] | None): Custom, application-specific metadata. String key -> String value. Limits: Maximum length of key is 32, value 512 characters, up to 16 key-value pairs. Maximum size of entire metadata is 4096 bytes. - limit (int | None): Maximum number of functions to return. Pass in -1, float('inf') or None to list all. + name: The name of the function. + owner: Owner of the function. + file_id: The file ID of the zip-file used to create the function. + status: Status of the function. Possible values: ["Queued", "Deploying", "Ready", "Failed"]. + external_id_prefix: External ID prefix to filter on. + created_time: Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. + metadata: Custom, application-specific metadata. String key -> String value. Limits: Maximum length of key is 32, value 512 characters, up to 16 key-value pairs. Maximum size of entire metadata is 4096 bytes. + limit: Maximum number of functions to return. Pass in -1, float('inf') or None to list all. Returns: - FunctionList: List of functions + List of functions Example: @@ -425,11 +424,11 @@ async def retrieve(self, id: int | None = None, external_id: str | None = None) """`Retrieve a single function by id. `_ Args: - id (int | None): ID - external_id (str | None): External ID + id: ID + external_id: External ID Returns: - Function | None: Requested function or None if it does not exist. + Requested function or None if it does not exist. Examples: @@ -456,12 +455,12 @@ async def retrieve_multiple( """`Retrieve multiple functions by id. `_ Args: - ids (Sequence[int] | None): IDs - external_ids (SequenceNotStr[str] | None): External IDs - ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception. + ids: IDs + external_ids: External IDs + ignore_unknown_ids: Ignore IDs and external IDs that are not found rather than throw an exception. Returns: - FunctionList: The requested functions. + The requested functions. Examples: @@ -496,17 +495,17 @@ async def call( """`Call a function by its ID or external ID. `_. Args: - id (int | None): ID - external_id (str | None): External ID - data (dict[str, object] | None): Input data to the function (JSON serializable). This data is passed deserialized into the function through one of the arguments called data. **WARNING:** Secrets or other confidential information should not be passed via this argument. There is a dedicated `secrets` argument in FunctionsAPI.create() for this purpose.' - wait (bool): Wait until the function call is finished. Defaults to True. - nonce (str | None): Nonce retrieved from sessions API when creating a session. This will be used to bind the session before executing the function. If not provided, a new session will be created based on the client credentials. + id: ID + external_id: External ID + data: Input data to the function (JSON serializable). This data is passed deserialized into the function through one of the arguments called data. **WARNING:** Secrets or other confidential information should not be passed via this argument. There is a dedicated `secrets` argument in FunctionsAPI.create() for this purpose.' + wait: Wait until the function call is finished. Defaults to True. + nonce: Nonce retrieved from sessions API when creating a session. This will be used to bind the session before executing the function. If not provided, a new session will be created based on the client credentials. Tip: You can create a session via the Sessions API, using the client.iam.session.create() method. Returns: - FunctionCall: A function call object. + A function call object. Examples: @@ -540,7 +539,7 @@ async def limits(self) -> FunctionsLimits: """`Get service limits. `_. Returns: - FunctionsLimits: A function limits object. + A function limits object. Examples: @@ -652,7 +651,7 @@ async def activate(self) -> FunctionsStatus: May take some time to take effect (hours). Returns: - FunctionsStatus: A function activation status. + A function activation status. Examples: @@ -670,7 +669,7 @@ async def status(self) -> FunctionsStatus: """`Functions activation status for the Project. `_. Returns: - FunctionsStatus: A function activation status. + A function activation status. Examples: @@ -694,11 +693,10 @@ def get_handle_function_node(file_content: str) -> ast.FunctionDef | ast.Assign and assignments since Cognite Functions require directly callable entry points. Args: - file_content (str): The Python source code as a string + file_content: The Python source code as a string Returns: - ast.FunctionDef | ast.Assign | ast.AnnAssign | None: The AST node of the last top-level 'handle' function, - assignment, or None if not found or if the file is not a valid Python file. + The AST node of the last top-level 'handle' function, assignment, or None if not found or if the file is not a valid Python file. """ try: tree = ast.parse(file_content) @@ -824,10 +822,10 @@ def _extract_requirements_from_file(file_name: str) -> list[str]: """Extracts a list of library requirements from a file. Comments, lines starting with '#', are ignored. Args: - file_name (str): name of the file to parse + file_name: name of the file to parse Returns: - list[str]: returns a list of library records + returns a list of library records """ requirements: list[str] = [] with open(file_name, "r+") as f: @@ -842,10 +840,10 @@ def _extract_requirements_from_doc_string(docstr: str) -> list[str] | None: """Extracts a list of library requirements defined between [requirements] and [/requirements] in a functions docstring. Args: - docstr (str): the docstring to extract requirements from + docstr: the docstring to extract requirements from Returns: - list[str] | None: returns a list of library records if requirements are defined in the docstring, else None + returns a list of library records if requirements are defined in the docstring, else None """ substr_start, substr_end = None, None @@ -867,11 +865,11 @@ def _validate_and_parse_requirements(requirements: list[str]) -> list[str]: """Validates the requirement specifications Args: - requirements (list[str]): list of requirement specifications + requirements: list of requirement specifications Raises: ValueError: if validation of requirements fails Returns: - list[str]: The parsed requirements + The parsed requirements """ constructors = local_import("pip._internal.req.constructors") install_req_from_line = constructors.install_req_from_line @@ -890,10 +888,10 @@ def _get_fn_docstring_requirements(fn: Callable) -> list[str]: """Read requirements from a function docstring, validate them and return. Args: - fn (Callable): the function to read requirements from + fn: the function to read requirements from Returns: - list[str]: A (possibly empty) list of requirements. + A (possibly empty) list of requirements. """ if docstr := getdoc(fn): if reqs := _extract_requirements_from_doc_string(docstr): diff --git a/cognite/client/_api/functions/calls.py b/cognite/client/_api/functions/calls.py index ea86ac2dc5..30f53c556b 100644 --- a/cognite/client/_api/functions/calls.py +++ b/cognite/client/_api/functions/calls.py @@ -30,16 +30,16 @@ async def list( """`List all calls associated with a specific function id. `_ Either function_id or function_external_id must be specified. Args: - function_id (int | None): ID of the function on which the calls were made. - function_external_id (str | None): External ID of the function on which the calls were made. - status (str | None): Status of the call. Possible values ["Running", "Failed", "Completed", "Timeout"]. - schedule_id (int | None): Schedule id from which the call belongs (if any). - start_time (dict[str, int] | None): Start time of the call. Possible keys are `min` and `max`, with values given as time stamps in ms. - end_time (dict[str, int] | None): End time of the call. Possible keys are `min` and `max`, with values given as time stamps in ms. - limit (int | None): Maximum number of function calls to list. Pass in -1, float('inf') or None to list all Function Calls. + function_id: ID of the function on which the calls were made. + function_external_id: External ID of the function on which the calls were made. + status: Status of the call. Possible values ["Running", "Failed", "Completed", "Timeout"]. + schedule_id: Schedule id from which the call belongs (if any). + start_time: Start time of the call. Possible keys are `min` and `max`, with values given as time stamps in ms. + end_time: End time of the call. Possible keys are `min` and `max`, with values given as time stamps in ms. + limit: Maximum number of function calls to list. Pass in -1, float('inf') or None to list all Function Calls. Returns: - FunctionCallList: List of function calls + List of function calls Examples: @@ -83,12 +83,12 @@ async def retrieve( """`Retrieve a single function call by id. `_ Args: - call_id (int): ID of the call. - function_id (int | None): ID of the function on which the call was made. - function_external_id (str | None): External ID of the function on which the call was made. + call_id: ID of the call. + function_id: ID of the function on which the call was made. + function_external_id: External ID of the function on which the call was made. Returns: - FunctionCall | None: Requested function call or None if either call ID or function identifier is not found. + Requested function call or None if either call ID or function identifier is not found. Examples: @@ -125,12 +125,12 @@ async def get_response( """`Retrieve the response from a function call. `_ Args: - call_id (int): ID of the call. - function_id (int | None): ID of the function on which the call was made. - function_external_id (str | None): External ID of the function on which the call was made. + call_id: ID of the call. + function_id: ID of the function on which the call was made. + function_external_id: External ID of the function on which the call was made. Returns: - dict[str, object] | None: Response from the function call. + Response from the function call. Examples: @@ -163,12 +163,12 @@ async def get_logs( """`Retrieve logs for function call. `_ Args: - call_id (int): ID of the call. - function_id (int | None): ID of the function on which the call was made. - function_external_id (str | None): External ID of the function on which the call was made. + call_id: ID of the call. + function_id: ID of the function on which the call was made. + function_external_id: External ID of the function on which the call was made. Returns: - FunctionCallLog: Log for the function call. + Log for the function call. Examples: diff --git a/cognite/client/_api/functions/schedules.py b/cognite/client/_api/functions/schedules.py index 71cd7b9437..a7970259ea 100644 --- a/cognite/client/_api/functions/schedules.py +++ b/cognite/client/_api/functions/schedules.py @@ -64,16 +64,16 @@ async def __call__( """Iterate over function schedules Args: - chunk_size (int | None): The number of schedules to return in each chunk. Defaults to yielding one schedule a time. - name (str | None): Name of the function schedule. - function_id (int | None): ID of the function the schedules are linked to. - function_external_id (str | None): External ID of the function the schedules are linked to. - created_time (dict[str, int] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. - cron_expression (str | None): Cron expression. - limit (int | None): Maximum schedules to return. Defaults to return all schedules. + chunk_size: The number of schedules to return in each chunk. Defaults to yielding one schedule a time. + name: Name of the function schedule. + function_id: ID of the function the schedules are linked to. + function_external_id: External ID of the function the schedules are linked to. + created_time: Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. + cron_expression: Cron expression. + limit: Maximum schedules to return. Defaults to return all schedules. Yields: - FunctionSchedule | FunctionSchedulesList: Function schedules. + Function schedules. """ # noqa: DOC404 _ensure_at_most_one_id_given(function_id, function_external_id) @@ -104,11 +104,11 @@ async def retrieve( """`Retrieve a single function schedule by id. `_ Args: - id (int | Sequence[int]): Schedule ID - ignore_unknown_ids (bool): Ignore IDs that are not found rather than throw an exception. + id: Schedule ID + ignore_unknown_ids: Ignore IDs that are not found rather than throw an exception. Returns: - FunctionSchedule | None | FunctionSchedulesList: Requested function schedule or None if not found. + Requested function schedule or None if not found. Examples: @@ -140,15 +140,15 @@ async def list( """`List all schedules associated with a specific project. `_ Args: - name (str | None): Name of the function schedule. - function_id (int | None): ID of the function the schedules are linked to. - function_external_id (str | None): External ID of the function the schedules are linked to. - created_time (dict[str, int] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. - cron_expression (str | None): Cron expression. - limit (int | None): Maximum number of schedules to list. Pass in -1, float('inf') or None to list all. + name: Name of the function schedule. + function_id: ID of the function the schedules are linked to. + function_external_id: External ID of the function the schedules are linked to. + created_time: Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. + cron_expression: Cron expression. + limit: Maximum number of schedules to list. Pass in -1, float('inf') or None to list all. Returns: - FunctionSchedulesList: List of function schedules + List of function schedules Examples: @@ -198,18 +198,16 @@ async def create( """`Create a schedule associated with a specific project. `_ Args: - name (str | FunctionScheduleWrite): Name of the schedule or FunctionSchedule object. If a function schedule object is passed, the other arguments are ignored except for the client_credentials argument. - cron_expression (str | None): Cron expression. - function_id (int | None): Id of the function to attach the schedule to. - function_external_id (str | None): (DEPRECATED) External id of the function to attach the schedule to. - Note: Will be automatically converted to (internal) ID, as schedules must be bound to an ID. - client_credentials (dict[str, str] | ClientCredentials | None): Instance of ClientCredentials - or a dictionary containing client credentials: 'client_id' and 'client_secret'. - description (str | None): Description of the schedule. - data (dict[str, object] | None): Data to be passed to the scheduled run. + name: Name of the schedule or FunctionSchedule object. If a function schedule object is passed, the other arguments are ignored except for the client_credentials argument. + cron_expression: Cron expression. + function_id: Id of the function to attach the schedule to. + function_external_id: (DEPRECATED) External id of the function to attach the schedule to. Note: Will be automatically converted to (internal) ID, as schedules must be bound to an ID. + client_credentials: Instance of ClientCredentials or a dictionary containing client credentials: 'client_id' and 'client_secret'. + description: Description of the schedule. + data: Data to be passed to the scheduled run. Returns: - FunctionSchedule: Created function schedule. + Created function schedule. Note: There are several ways to authenticate the function schedule — the order of priority is as follows: @@ -315,7 +313,7 @@ async def delete(self, id: int) -> None: """`Delete a schedule associated with a specific project. `_ Args: - id (int): Id of the schedule + id: Id of the schedule Examples: @@ -334,10 +332,10 @@ async def get_input_data(self, id: int) -> dict[str, object] | None: """`Retrieve the input data to the associated function. `_ Args: - id (int): Id of the schedule + id: Id of the schedule Returns: - dict[str, object] | None: Input data to the associated function or None if not set. This data is passed deserialized into the function through the data argument. + Input data to the associated function or None if not set. This data is passed deserialized into the function through the data argument. Examples: diff --git a/cognite/client/_api/geospatial.py b/cognite/client/_api/geospatial.py index b3ea42756a..784e0b909b 100644 --- a/cognite/client/_api/geospatial.py +++ b/cognite/client/_api/geospatial.py @@ -64,10 +64,10 @@ async def create_feature_types( Args: - feature_type (FeatureType | FeatureTypeWrite | Sequence[FeatureType] | Sequence[FeatureTypeWrite]): feature type definition or list of feature type definitions to create. + feature_type: feature type definition or list of feature type definitions to create. Returns: - FeatureType | FeatureTypeList: Created feature type definition(s) + Created feature type definition(s) Examples: @@ -100,8 +100,8 @@ async def delete_feature_types(self, external_id: str | SequenceNotStr[str], rec Args: - external_id (str | SequenceNotStr[str]): External ID or list of external ids - recursive (bool): if `true` the features will also be dropped + external_id: External ID or list of external ids + recursive: if `true` the features will also be dropped Examples: @@ -125,7 +125,7 @@ async def list_feature_types(self) -> FeatureTypeList: Returns: - FeatureTypeList: List of feature types + List of feature types Examples: @@ -155,10 +155,10 @@ async def retrieve_feature_types(self, external_id: str | list[str]) -> FeatureT Args: - external_id (str | list[str]): External ID + external_id: External ID Returns: - FeatureType | FeatureTypeList: Requested Type or None if it does not exist. + Requested Type or None if it does not exist. Examples: @@ -182,10 +182,10 @@ async def patch_feature_types(self, patch: FeatureTypePatch | Sequence[FeatureTy Args: - patch (FeatureTypePatch | Sequence[FeatureTypePatch]): the patch to apply + patch: the patch to apply Returns: - FeatureTypeList: The patched feature types. + The patched feature types. Examples: @@ -263,13 +263,13 @@ async def create_features( Args: - feature_type_external_id (str): Feature type definition for the features to create. - feature (Feature | FeatureWrite | Sequence[Feature] | Sequence[FeatureWrite] | FeatureList | FeatureWriteList): one feature or a list of features to create or a FeatureList object - allow_crs_transformation (bool): If true, then input geometries will be transformed into the Coordinate Reference System defined in the feature type specification. When it is false, then requests with geometries in Coordinate Reference System different from the ones defined in the feature type will result in CogniteAPIError exception. - chunk_size (int | None): maximum number of items in a single request to the api + feature_type_external_id: Feature type definition for the features to create. + feature: one feature or a list of features to create or a FeatureList object + allow_crs_transformation: If true, then input geometries will be transformed into the Coordinate Reference System defined in the feature type specification. When it is false, then requests with geometries in Coordinate Reference System different from the ones defined in the feature type will result in CogniteAPIError exception. + chunk_size: maximum number of items in a single request to the api Returns: - Feature | FeatureList: Created features + Created features Examples: @@ -322,8 +322,8 @@ async def delete_features( Args: - feature_type_external_id (str): No description. - external_id (str | SequenceNotStr[str] | None): External ID or list of external ids + feature_type_external_id: No description. + external_id: External ID or list of external ids Examples: @@ -368,12 +368,12 @@ async def retrieve_features( Args: - feature_type_external_id (str): No description. - external_id (str | list[str]): External ID or list of external ids - properties (dict[str, Any] | None): the output property selection + feature_type_external_id: No description. + external_id: External ID or list of external ids + properties: the output property selection Returns: - FeatureList | Feature: Requested features or None if it does not exist. + Requested features or None if it does not exist. Examples: @@ -426,13 +426,13 @@ async def update_features( Args: - feature_type_external_id (str): No description. - feature (Feature | FeatureWrite | Sequence[Feature] | Sequence[FeatureWrite]): feature or list of features. - allow_crs_transformation (bool): If true, then input geometries will be transformed into the Coordinate Reference System defined in the feature type specification. When it is false, then requests with geometries in Coordinate Reference System different from the ones defined in the feature type will result in CogniteAPIError exception. - chunk_size (int | None): maximum number of items in a single request to the api + feature_type_external_id: No description. + feature: feature or list of features. + allow_crs_transformation: If true, then input geometries will be transformed into the Coordinate Reference System defined in the feature type specification. When it is false, then requests with geometries in Coordinate Reference System different from the ones defined in the feature type will result in CogniteAPIError exception. + chunk_size: maximum number of items in a single request to the api Returns: - Feature | FeatureList: Updated features + Updated features Examples: @@ -481,14 +481,14 @@ async def list_features( This method allows to filter all features. Args: - feature_type_external_id (str): the feature type to list features for - filter (dict[str, Any] | None): the list filter - properties (dict[str, Any] | None): the output property selection - limit (int | None): Maximum number of features to return. Defaults to 25. Set to -1, float("inf") or None to return all features. - allow_crs_transformation (bool): If true, then input geometries if existing in the filter will be transformed into the Coordinate Reference System defined in the feature type specification. When it is false, then requests with geometries in Coordinate Reference System different from the ones defined in the feature type will result in CogniteAPIError exception. + feature_type_external_id: the feature type to list features for + filter: the list filter + properties: the output property selection + limit: Maximum number of features to return. Defaults to 25. Set to -1, float("inf") or None to return all features. + allow_crs_transformation: If true, then input geometries if existing in the filter will be transformed into the Coordinate Reference System defined in the feature type specification. When it is false, then requests with geometries in Coordinate Reference System different from the ones defined in the feature type will result in CogniteAPIError exception. Returns: - FeatureList: The filtered features + The filtered features Examples: @@ -564,16 +564,16 @@ async def search_features( If you need to return more than 1000 items, use the `stream_features(...)` method instead. Args: - feature_type_external_id (str): The feature type to search for - filter (dict[str, Any] | None): The search filter - properties (dict[str, Any] | None): The output property selection - limit (int): Maximum number of results - order_by (Sequence[OrderSpec] | None): The order specification - allow_crs_transformation (bool): If true, then input geometries will be transformed into the Coordinate Reference System defined in the feature type specification. When it is false, then requests with geometries in Coordinate Reference System different from the ones defined in the feature type will result in CogniteAPIError exception. - allow_dimensionality_mismatch (bool): Indicating if the spatial filter operators allow input geometries with a different dimensionality than the properties they are applied to. Defaults to False. + feature_type_external_id: The feature type to search for + filter: The search filter + properties: The output property selection + limit: Maximum number of results + order_by: The order specification + allow_crs_transformation: If true, then input geometries will be transformed into the Coordinate Reference System defined in the feature type specification. When it is false, then requests with geometries in Coordinate Reference System different from the ones defined in the feature type will result in CogniteAPIError exception. + allow_dimensionality_mismatch: Indicating if the spatial filter operators allow input geometries with a different dimensionality than the properties they are applied to. Defaults to False. Returns: - FeatureList: the filtered features + the filtered features Examples: @@ -692,14 +692,14 @@ async def stream_features( If you need to order the results, use the `search_features(...)` method instead. Args: - feature_type_external_id (str): the feature type to search for - filter (dict[str, Any] | None): the search filter - properties (dict[str, Any] | None): the output property selection - allow_crs_transformation (bool): If true, then input geometries will be transformed into the Coordinate Reference System defined in the feature type specification. When it is false, then requests with geometries in Coordinate Reference System different from the ones defined in the feature type will result in CogniteAPIError exception. - allow_dimensionality_mismatch (bool): Indicating if the spatial filter operators allow input geometries with a different dimensionality than the properties they are applied to. Defaults to False. + feature_type_external_id: the feature type to search for + filter: the search filter + properties: the output property selection + allow_crs_transformation: If true, then input geometries will be transformed into the Coordinate Reference System defined in the feature type specification. When it is false, then requests with geometries in Coordinate Reference System different from the ones defined in the feature type will result in CogniteAPIError exception. + allow_dimensionality_mismatch: Indicating if the spatial filter operators allow input geometries with a different dimensionality than the properties they are applied to. Defaults to False. Yields: - Feature: a generator for the filtered features + a generator for the filtered features Examples: @@ -753,14 +753,14 @@ async def aggregate_features( Args: - feature_type_external_id (str): the feature type to filter features from - filter (dict[str, Any] | None): the search filter - group_by (SequenceNotStr[str] | None): list of properties to group by with - order_by (Sequence[OrderSpec] | None): the order specification - output (dict[str, Any] | None): the aggregate output + feature_type_external_id: the feature type to filter features from + filter: the search filter + group_by: list of properties to group by with + order_by: the order specification + output: the aggregate output Returns: - FeatureAggregateList: the filtered features + the filtered features Examples: @@ -804,10 +804,10 @@ async def get_coordinate_reference_systems(self, srids: int | Sequence[int]) -> Args: - srids (int | Sequence[int]): (Union[int, Sequence[int]]): SRID or list of SRIDs + srids: SRID or list of SRIDs Returns: - CoordinateReferenceSystemList: Requested CRSs. + Requested CRSs. Examples: @@ -835,10 +835,10 @@ async def list_coordinate_reference_systems(self, only_custom: bool = False) -> Args: - only_custom (bool): list only custom CRSs or not + only_custom: list only custom CRSs or not Returns: - CoordinateReferenceSystemList: list of CRSs. + list of CRSs. Examples: @@ -867,10 +867,10 @@ async def create_coordinate_reference_systems( Args: - crs (CoordinateReferenceSystem | CoordinateReferenceSystemWrite | Sequence[CoordinateReferenceSystem] | Sequence[CoordinateReferenceSystemWrite]): a CoordinateReferenceSystem or a list of CoordinateReferenceSystem + crs: a CoordinateReferenceSystem or a list of CoordinateReferenceSystem Returns: - CoordinateReferenceSystemList: list of CRSs. + list of CRSs. Examples: @@ -934,7 +934,7 @@ async def delete_coordinate_reference_systems(self, srids: int | Sequence[int]) Args: - srids (int | Sequence[int]): (Union[int, Sequence[int]]): SRID or list of SRIDs + srids: SRID or list of SRIDs Examples: @@ -971,18 +971,18 @@ async def put_raster( """`Put raster ` Args: - feature_type_external_id (str): No description. - feature_external_id (str): one feature or a list of features to create - raster_property_name (str): the raster property name - raster_format (str): the raster input format - raster_srid (int): the associated SRID for the raster - file (str | Path): the path to the file of the raster - allow_crs_transformation (bool): When the parameter is false, requests with rasters in Coordinate Reference System different from the one defined in the feature type will result in bad request response code. - raster_scale_x (float | None): the X component of the pixel width in units of coordinate reference system - raster_scale_y (float | None): the Y component of the pixel height in units of coordinate reference system + feature_type_external_id: No description. + feature_external_id: one feature or a list of features to create + raster_property_name: the raster property name + raster_format: the raster input format + raster_srid: the associated SRID for the raster + file: the path to the file of the raster + allow_crs_transformation: When the parameter is false, requests with rasters in Coordinate Reference System different from the one defined in the feature type will result in bad request response code. + raster_scale_x: the X component of the pixel width in units of coordinate reference system + raster_scale_y: the Y component of the pixel height in units of coordinate reference system Returns: - RasterMetadata: the raster metadata if it was ingested successfully + the raster metadata if it was ingested successfully Examples: @@ -1027,9 +1027,9 @@ async def delete_raster( """`Delete raster ` Args: - feature_type_external_id (str): No description. - feature_external_id (str): one feature or a list of features to create - raster_property_name (str): the raster property name + feature_type_external_id: No description. + feature_external_id: one feature or a list of features to create + raster_property_name: the raster property name Examples: @@ -1063,18 +1063,18 @@ async def get_raster( """`Get raster ` Args: - feature_type_external_id (str): Feature type definition for the features to create. - feature_external_id (str): one feature or a list of features to create - raster_property_name (str): the raster property name - raster_format (str): the raster output format - raster_options (dict[str, Any] | None): GDAL raster creation key-value options - raster_srid (int | None): the SRID for the output raster - raster_scale_x (float | None): the X component of the output pixel width in units of coordinate reference system - raster_scale_y (float | None): the Y component of the output pixel height in units of coordinate reference system - allow_crs_transformation (bool): When the parameter is false, requests with output rasters in Coordinate Reference System different from the one defined in the feature type will result in bad request response code. + feature_type_external_id: Feature type definition for the features to create. + feature_external_id: one feature or a list of features to create + raster_property_name: the raster property name + raster_format: the raster output format + raster_options: GDAL raster creation key-value options + raster_srid: the SRID for the output raster + raster_scale_x: the X component of the output pixel width in units of coordinate reference system + raster_scale_y: the Y component of the output pixel height in units of coordinate reference system + allow_crs_transformation: When the parameter is false, requests with output rasters in Coordinate Reference System different from the one defined in the feature type will result in bad request response code. Returns: - bytes: the raster data + the raster data Examples: @@ -1111,10 +1111,10 @@ async def compute( """`Compute ` Args: - output (dict[str, GeospatialComputeFunction]): No description. + output: No description. Returns: - GeospatialComputedResponse: Mapping of keys to computed items. + Mapping of keys to computed items. Examples: diff --git a/cognite/client/_api/hosted_extractors/destinations.py b/cognite/client/_api/hosted_extractors/destinations.py index 90ad499e61..238d422ca2 100644 --- a/cognite/client/_api/hosted_extractors/destinations.py +++ b/cognite/client/_api/hosted_extractors/destinations.py @@ -47,11 +47,11 @@ async def __call__( Fetches Destination as they are iterated over, so you keep a limited number of destinations in memory. Args: - chunk_size (int | None): Number of Destinations to return in each chunk. Defaults to yielding one Destination a time. - limit (int | None): Maximum number of Destination to return. Defaults to returning all items. + chunk_size: Number of Destinations to return in each chunk. Defaults to yielding one Destination a time. + limit: Maximum number of Destination to return. Defaults to returning all items. Yields: - Destination | DestinationList: yields Destination one by one if chunk_size is not specified, else DestinationList objects. + yields Destination one by one if chunk_size is not specified, else DestinationList objects. """ # noqa: DOC404 self._warning.warn() @@ -79,12 +79,11 @@ async def retrieve( """`Retrieve one or more destinations. `_ Args: - external_ids (str | SequenceNotStr[str]): The external ID provided by the client. Must be unique for the resource type. - ignore_unknown_ids (bool): Ignore external IDs that are not found - + external_ids: The external ID provided by the client. Must be unique for the resource type. + ignore_unknown_ids: Ignore external IDs that are not found Returns: - Destination | DestinationList: Requested destinations + Requested destinations Examples: @@ -113,9 +112,9 @@ async def delete( """`Delete one or more destsinations `_ Args: - external_ids (str | SequenceNotStr[str]): The external ID provided by the client. Must be unique for the resource type. - ignore_unknown_ids (bool): Ignore external IDs that are not found - force (bool): Delete any jobs associated with each item. + external_ids: The external ID provided by the client. Must be unique for the resource type. + ignore_unknown_ids: Ignore external IDs that are not found + force: Delete any jobs associated with each item. Examples: @@ -151,10 +150,10 @@ async def create(self, items: DestinationWrite | Sequence[DestinationWrite]) -> """`Create one or more destinations. `_ Args: - items (DestinationWrite | Sequence[DestinationWrite]): Destination(s) to create. + items: Destination(s) to create. Returns: - Destination | DestinationList: Created destination(s) + Created destination(s) Examples: @@ -198,11 +197,11 @@ async def update( """`Update one or more destinations. `_ Args: - items (DestinationWrite | DestinationUpdate | Sequence[DestinationWrite | DestinationUpdate]): Destination(s) to update. - mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (DestinationWrite). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. + items: Destination(s) to update. + mode: How to update data when a non-update object is given (DestinationWrite). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. Returns: - Destination | DestinationList: Updated destination(s) + Updated destination(s) Examples: @@ -232,10 +231,10 @@ async def list( """`List destinations `_ Args: - limit (int | None): Maximum number of destinations to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + limit: Maximum number of destinations to return. Defaults to 25. Set to -1, float("inf") or None to return all items. Returns: - DestinationList: List of requested destinations + List of requested destinations Examples: diff --git a/cognite/client/_api/hosted_extractors/jobs.py b/cognite/client/_api/hosted_extractors/jobs.py index a21842d5e3..f2d7087170 100644 --- a/cognite/client/_api/hosted_extractors/jobs.py +++ b/cognite/client/_api/hosted_extractors/jobs.py @@ -51,11 +51,11 @@ async def __call__( Fetches jobs as they are iterated over, so you keep a limited number of jobs in memory. Args: - chunk_size (int | None): Number of jobs to return in each chunk. Defaults to yielding one job a time. - limit (int | None): Maximum number of jobs to return. Defaults to returning all items. + chunk_size: Number of jobs to return in each chunk. Defaults to yielding one job a time. + limit: Maximum number of jobs to return. Defaults to returning all items. Yields: - Job | JobList: yields Job one by one if chunk_size is not specified, else JobList objects. + yields Job one by one if chunk_size is not specified, else JobList objects. """ # noqa: DOC404 self._warning.warn() async for item in self._list_generator( @@ -80,11 +80,11 @@ async def retrieve( """`Retrieve one or more jobs. `_ Args: - external_ids (str | SequenceNotStr[str]): The external ID provided by the client. Must be unique for the job type. - ignore_unknown_ids (bool): Ignore external IDs that are not found + external_ids: The external ID provided by the client. Must be unique for the job type. + ignore_unknown_ids: Ignore external IDs that are not found Returns: - Job | None | JobList: Requested jobs + Requested jobs Examples: @@ -115,8 +115,8 @@ async def delete( """`Delete one or more jobs `_ Args: - external_ids (str | SequenceNotStr[str]): The external ID provided by the client. Must be unique for the resource type. - ignore_unknown_ids (bool): Ignore external IDs that are not found + external_ids: The external ID provided by the client. Must be unique for the resource type. + ignore_unknown_ids: Ignore external IDs that are not found Examples: Delete jobs by external id: @@ -149,10 +149,10 @@ async def create(self, items: JobWrite | Sequence[JobWrite]) -> Job | JobList: """`Create one or more jobs. `_ Args: - items (JobWrite | Sequence[JobWrite]): Job(s) to create. + items: Job(s) to create. Returns: - Job | JobList: Created job(s) + Created job(s) Examples: @@ -196,11 +196,11 @@ async def update( """`Update one or more jobs. `_ Args: - items (JobWrite | JobUpdate | Sequence[JobWrite | JobUpdate]): Job(s) to update. - mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (JobWrite). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. + items: Job(s) to update. + mode: How to update data when a non-update object is given (JobWrite). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. Returns: - Job | JobList: Updated job(s) + Updated job(s) Examples: @@ -230,10 +230,10 @@ async def list( """`List jobs `_ Args: - limit (int | None): Maximum number of jobs to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + limit: Maximum number of jobs to return. Defaults to 25. Set to -1, float("inf") or None to return all items. Returns: - JobList: List of requested jobs + List of requested jobs Examples: @@ -273,13 +273,13 @@ async def list_logs( """`List job logs. `_ Args: - job (str | None): Require returned logs to belong to the job given by this external ID. - source (str | None): Require returned logs to belong to the any job with source given by this external ID. - destination (str | None): Require returned logs to belong to the any job with destination given by this external ID. - limit (int | None): Maximum number of logs to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + job: Require returned logs to belong to the job given by this external ID. + source: Require returned logs to belong to the any job with source given by this external ID. + destination: Require returned logs to belong to the any job with destination given by this external ID. + limit: Maximum number of logs to return. Defaults to 25. Set to -1, float("inf") or None to return all items. Returns: - JobLogsList: List of requested job logs + List of requested job logs Examples: @@ -319,13 +319,13 @@ async def list_metrics( """`List job metrics. `_ Args: - job (str | None): Require returned metrics to belong to the job given by this external ID. - source (str | None): Require returned metrics to belong to the any job with source given by this external ID. - destination (str | None): Require returned metrics to belong to the any job with destination given by this external ID. - limit (int | None): Maximum number of metrics to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + job: Require returned metrics to belong to the job given by this external ID. + source: Require returned metrics to belong to the any job with source given by this external ID. + destination: Require returned metrics to belong to the any job with destination given by this external ID. + limit: Maximum number of metrics to return. Defaults to 25. Set to -1, float("inf") or None to return all items. Returns: - JobMetricsList: List of requested job metrics + List of requested job metrics Examples: diff --git a/cognite/client/_api/hosted_extractors/mappings.py b/cognite/client/_api/hosted_extractors/mappings.py index 1d513b0af5..bd03895577 100644 --- a/cognite/client/_api/hosted_extractors/mappings.py +++ b/cognite/client/_api/hosted_extractors/mappings.py @@ -42,11 +42,11 @@ async def __call__( Fetches Mapping as they are iterated over, so you keep a limited number of mappings in memory. Args: - chunk_size (int | None): Number of Mappings to return in each chunk. Defaults to yielding one mapping at a time. - limit (int | None): Maximum number of mappings to return. Defaults to returning all items. + chunk_size: Number of Mappings to return in each chunk. Defaults to yielding one mapping at a time. + limit: Maximum number of mappings to return. Defaults to returning all items. Yields: - Mapping | MappingList: yields Mapping one by one if chunk_size is not specified, else MappingList objects. + yields Mapping one by one if chunk_size is not specified, else MappingList objects. """ # noqa: DOC404 self._warning.warn() @@ -72,12 +72,11 @@ async def retrieve( """`Retrieve one or more mappings. `_ Args: - external_ids (str | SequenceNotStr[str]): The external ID provided by the client. Must be unique for the resource type. - ignore_unknown_ids (bool): Ignore external IDs that are not found - + external_ids: The external ID provided by the client. Must be unique for the resource type. + ignore_unknown_ids: Ignore external IDs that are not found Returns: - Mapping | MappingList: Requested mappings + Requested mappings Examples: @@ -106,9 +105,9 @@ async def delete( """`Delete one or more mappings `_ Args: - external_ids (str | SequenceNotStr[str]): The external ID provided by the client. Must be unique for the resource type. - ignore_unknown_ids (bool): Ignore external IDs that are not found - force (bool): Delete any jobs associated with each item. + external_ids: The external ID provided by the client. Must be unique for the resource type. + ignore_unknown_ids: Ignore external IDs that are not found + force: Delete any jobs associated with each item. Examples: @@ -143,10 +142,10 @@ async def create(self, items: MappingWrite | Sequence[MappingWrite]) -> Mapping """`Create one or more mappings. `_ Args: - items (MappingWrite | Sequence[MappingWrite]): Mapping(s) to create. + items: Mapping(s) to create. Returns: - Mapping | MappingList: Created mapping(s) + Created mapping(s) Examples: @@ -180,10 +179,10 @@ async def update( """`Update one or more mappings. `_ Args: - items (MappingWrite | MappingUpdate | Sequence[MappingWrite | MappingUpdate]): Mapping(s) to update. + items: Mapping(s) to update. Returns: - Mapping | MappingList: Updated mapping(s) + Updated mapping(s) Examples: @@ -212,10 +211,10 @@ async def list( """`List mappings `_ Args: - limit (int | None): Maximum number of mappings to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + limit: Maximum number of mappings to return. Defaults to 25. Set to -1, float("inf") or None to return all items. Returns: - MappingList: List of requested mappings + List of requested mappings Examples: diff --git a/cognite/client/_api/hosted_extractors/sources.py b/cognite/client/_api/hosted_extractors/sources.py index 87cfd367b0..3bc6bdf23e 100644 --- a/cognite/client/_api/hosted_extractors/sources.py +++ b/cognite/client/_api/hosted_extractors/sources.py @@ -43,11 +43,11 @@ async def __call__( Fetches sources as they are iterated over, so you keep a limited number of sources in memory. Args: - chunk_size (int | None): Number of sources to return in each chunk. Defaults to yielding one source a time. - limit (int | None): Maximum number of sources to return. Defaults to returning all items. + chunk_size: Number of sources to return in each chunk. Defaults to yielding one source a time. + limit: Maximum number of sources to return. Defaults to returning all items. Yields: - Source | SourceList: yields Source one by one if chunk_size is not specified, else SourceList objects. + yields Source one by one if chunk_size is not specified, else SourceList objects. """ # noqa: DOC404 self._warning.warn() @@ -73,11 +73,11 @@ async def retrieve( """`Retrieve one or more sources. `_ Args: - external_ids (str | SequenceNotStr[str]): The external ID provided by the client. Must be unique for the resource type. - ignore_unknown_ids (bool): Ignore external IDs that are not found rather than throw an exception. + external_ids: The external ID provided by the client. Must be unique for the resource type. + ignore_unknown_ids: Ignore external IDs that are not found rather than throw an exception. Returns: - Source | SourceList: Requested sources + Requested sources Examples: @@ -106,9 +106,9 @@ async def delete( """`Delete one or more sources `_ Args: - external_ids (str | SequenceNotStr[str]): The external ID provided by the client. Must be unique for the resource type. - ignore_unknown_ids (bool): Ignore external IDs that are not found rather than throw an exception. - force (bool): Delete any jobs associated with each item. + external_ids: The external ID provided by the client. Must be unique for the resource type. + ignore_unknown_ids: Ignore external IDs that are not found rather than throw an exception. + force: Delete any jobs associated with each item. Examples: Delete sources by id: @@ -142,10 +142,10 @@ async def create(self, items: SourceWrite | Sequence[SourceWrite]) -> Source | S """`Create one or more sources. `_ Args: - items (SourceWrite | Sequence[SourceWrite]): Source(s) to create. + items: Source(s) to create. Returns: - Source | SourceList: Created source(s) + Created source(s) Examples: @@ -189,11 +189,11 @@ async def update( """`Update one or more sources. `_ Args: - items (SourceWrite | SourceUpdate | Sequence[SourceWrite | SourceUpdate]): Source(s) to update. - mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (SourceWrite). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. + items: Source(s) to update. + mode: How to update data when a non-update object is given (SourceWrite). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. Returns: - Source | SourceList: Updated source(s) + Updated source(s) Examples: @@ -236,10 +236,10 @@ async def list( """`List sources `_ Args: - limit (int | None): Maximum number of sources to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + limit: Maximum number of sources to return. Defaults to 25. Set to -1, float("inf") or None to return all items. Returns: - SourceList: List of requested sources + List of requested sources Examples: diff --git a/cognite/client/_api/iam/__init__.py b/cognite/client/_api/iam/__init__.py index 067556b127..45e9c2349e 100644 --- a/cognite/client/_api/iam/__init__.py +++ b/cognite/client/_api/iam/__init__.py @@ -113,14 +113,12 @@ def compare_capabilities( ``client.iam.verify_capabilities`` instead. Args: - existing_capabilities (ComparableCapability): List of existing capabilities. - desired_capabilities (ComparableCapability): List of wanted capabilities to check against existing. - project (str | None): If a ProjectCapability or ProjectCapabilityList is passed, we need to know which CDF project - to pull capabilities from (existing might be from several). If project is not passed, and ProjectCapabilityList - is used, it will be inferred from the AsyncCogniteClient used to call retrieve it via token/inspect. + existing_capabilities: List of existing capabilities. + desired_capabilities: List of wanted capabilities to check against existing. + project: If a ProjectCapability or ProjectCapabilityList is passed, we need to know which CDF project to pull capabilities from (existing might be from several). If project is not passed, and ProjectCapabilityList is used, it will be inferred from the AsyncCogniteClient used to call retrieve it via token/inspect. Returns: - list[Capability]: A flattened list of the missing capabilities, meaning they each have exactly 1 action, 1 scope, 1 id etc. + A flattened list of the missing capabilities, meaning they each have exactly 1 action, 1 scope, 1 id etc. Examples: @@ -210,10 +208,10 @@ async def verify_capabilities(self, desired_capabilities: ComparableCapability) """Helper method to compare your current capabilities with a set of desired capabilities and return any missing. Args: - desired_capabilities (ComparableCapability): List of desired capabilities to check against existing. + desired_capabilities: List of desired capabilities to check against existing. Returns: - list[Capability]: A flattened list of the missing capabilities, meaning they each have exactly 1 action, 1 scope, 1 id etc. + A flattened list of the missing capabilities, meaning they each have exactly 1 action, 1 scope, 1 id etc. Examples: diff --git a/cognite/client/_api/iam/groups.py b/cognite/client/_api/iam/groups.py index 06b4aa77a4..ffd5bb8d23 100644 --- a/cognite/client/_api/iam/groups.py +++ b/cognite/client/_api/iam/groups.py @@ -49,10 +49,10 @@ async def list(self, all: bool = False) -> GroupList: """`List groups. `_ Args: - all (bool): Whether to get all groups, only available with the groups:list acl. + all: Whether to get all groups, only available with the groups:list acl. Returns: - GroupList: List of groups. + List of groups. Example: @@ -82,9 +82,9 @@ async def create(self, group: Group | GroupWrite | Sequence[Group] | Sequence[Gr """`Create one or more groups. `_ Args: - group (Group | GroupWrite | Sequence[Group] | Sequence[GroupWrite]): Group or list of groups to create. + group: Group or list of groups to create. Returns: - Group | GroupList: The created group(s). + The created group(s). Example: @@ -149,7 +149,7 @@ async def delete(self, id: int | Sequence[int]) -> None: """`Delete one or more groups. `_ Args: - id (int | Sequence[int]): ID or list of IDs of groups to delete. + id: ID or list of IDs of groups to delete. Example: diff --git a/cognite/client/_api/iam/security_categories.py b/cognite/client/_api/iam/security_categories.py index c2f9beb453..79c87af400 100644 --- a/cognite/client/_api/iam/security_categories.py +++ b/cognite/client/_api/iam/security_categories.py @@ -17,10 +17,10 @@ async def list(self, limit: int | None = DEFAULT_LIMIT_READ) -> SecurityCategory """`List security categories. `_ Args: - limit (int | None): Max number of security categories to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + limit: Max number of security categories to return. Defaults to 25. Set to -1, float("inf") or None to return all items. Returns: - SecurityCategoryList: List of security categories + List of security categories Example: @@ -51,10 +51,10 @@ async def create( """`Create one or more security categories. `_ Args: - security_category (SecurityCategory | SecurityCategoryWrite | Sequence[SecurityCategory] | Sequence[SecurityCategoryWrite]): Security category or list of categories to create. + security_category: Security category or list of categories to create. Returns: - SecurityCategory | SecurityCategoryList: The created security category or categories. + The created security category or categories. Example: @@ -78,7 +78,7 @@ async def delete(self, id: int | Sequence[int]) -> None: """`Delete one or more security categories. `_ Args: - id (int | Sequence[int]): ID or list of IDs of security categories to delete. + id: ID or list of IDs of security categories to delete. Example: diff --git a/cognite/client/_api/iam/sessions.py b/cognite/client/_api/iam/sessions.py index c4dd245bd5..aa391c7c48 100644 --- a/cognite/client/_api/iam/sessions.py +++ b/cognite/client/_api/iam/sessions.py @@ -33,13 +33,8 @@ async def create( """`Create a session. `_ Args: - client_credentials (ClientCredentials | None): The client credentials to create the session. This is required - if session_type is set to 'CLIENT_CREDENTIALS'. - session_type (SessionType | Literal['DEFAULT']): The type of session to create. Can be - either 'CLIENT_CREDENTIALS', 'TOKEN_EXCHANGE', 'ONESHOT_TOKEN_EXCHANGE' or 'DEFAULT'. - Defaults to 'DEFAULT' which will use -this- AsyncCogniteClient object to create the session. - If this client was created using a token, 'TOKEN_EXCHANGE' will be used, and if - this client was created using client credentials, 'CLIENT_CREDENTIALS' will be used. + client_credentials: The client credentials to create the session. This is required if session_type is set to 'CLIENT_CREDENTIALS'. + session_type: The type of session to create. Can be either 'CLIENT_CREDENTIALS', 'TOKEN_EXCHANGE', 'ONESHOT_TOKEN_EXCHANGE' or 'DEFAULT'. Defaults to 'DEFAULT' which will use -this- AsyncCogniteClient object to create the session. If this client was created using a token, 'TOKEN_EXCHANGE' will be used, and if this client was created using client credentials, 'CLIENT_CREDENTIALS' will be used. Session Types: @@ -48,7 +43,7 @@ async def create( * **one_shot_token_exchange**: Credentials for a session using one-shot token exchange to reuse the user's credentials. One-shot sessions are short-lived sessions that are not refreshed and do not require support for token exchange from the identity provider. Returns: - CreatedSession: The object with token inspection details. + The object with token inspection details. """ if client_credentials is None and isinstance(creds := self._config.credentials, OAuthClientCredentials): client_credentials = ClientCredentials(creds.client_id, creds.client_secret) @@ -86,10 +81,10 @@ async def revoke(self, id: int | Sequence[int]) -> Session | SessionList: """`Revoke access to a session. Revocation of a session may in some cases take up to 1 hour to take effect. `_ Args: - id (int | Sequence[int]): Id or list of session ids + id: Id or list of session ids Returns: - Session | SessionList: List of revoked sessions. If the user does not have the sessionsAcl:LIST capability, then only the session IDs will be present in the response. + LIST capability, then only the session IDs will be present in the response. """ ident_sequence = IdentifierSequence.load(ids=id, external_ids=None) @@ -119,10 +114,10 @@ async def retrieve(self, id: int | Sequence[int]) -> Session | SessionList: The request will fail if any of the IDs does not belong to an existing session. Args: - id (int | Sequence[int]): Id or list of session ids + id: Id or list of session ids Returns: - Session | SessionList: Session or list of sessions. + Session or list of sessions. """ identifiers = IdentifierSequence.load(ids=id, external_ids=None) @@ -136,11 +131,11 @@ async def list(self, status: SessionStatus | None = None, limit: int = DEFAULT_L """`List all sessions in the current project. `_ Args: - status (SessionStatus | None): If given, only sessions with the given status are returned. - limit (int): Max number of sessions to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + status: If given, only sessions with the given status are returned. + limit: Max number of sessions to return. Defaults to 25. Set to -1, float("inf") or None to return all items. Returns: - SessionList: a list of sessions in the current project. + a list of sessions in the current project. """ filter = {"status": status.upper()} if status is not None else None return await self._list(list_cls=SessionList, resource_cls=Session, method="GET", filter=filter, limit=limit) diff --git a/cognite/client/_api/iam/token.py b/cognite/client/_api/iam/token.py index 48b477b70e..3f154f8baf 100644 --- a/cognite/client/_api/iam/token.py +++ b/cognite/client/_api/iam/token.py @@ -11,7 +11,7 @@ async def inspect(self) -> TokenInspection: Get details about which projects it belongs to and which capabilities are granted to it. Returns: - TokenInspection: The object with token inspection details. + The object with token inspection details. Example: diff --git a/cognite/client/_api/labels.py b/cognite/client/_api/labels.py index 6a1aeff093..79cbdc36bb 100644 --- a/cognite/client/_api/labels.py +++ b/cognite/client/_api/labels.py @@ -54,15 +54,15 @@ async def __call__( """Iterate over Labels Args: - chunk_size (int | None): Number of Labels to return in each chunk. Defaults to yielding one Label a time. - name (str | None): returns the label definitions matching that name - external_id_prefix (str | None): filter label definitions with external ids starting with the prefix specified - limit (int | None): Maximum number of label definitions to return. Defaults return all labels. - data_set_ids (int | Sequence[int] | None): return only labels in the data sets with this id / these ids. - data_set_external_ids (str | SequenceNotStr[str] | None): return only labels in the data sets with this external id / these external ids. + chunk_size: Number of Labels to return in each chunk. Defaults to yielding one Label a time. + name: returns the label definitions matching that name + external_id_prefix: filter label definitions with external ids starting with the prefix specified + limit: Maximum number of label definitions to return. Defaults return all labels. + data_set_ids: return only labels in the data sets with this id / these ids. + data_set_external_ids: return only labels in the data sets with this external id / these external ids. Yields: - LabelDefinition | LabelDefinitionList: yields Labels one by one or in chunks. + yields Labels one by one or in chunks. """ # noqa: DOC404 data_set_ids_processed = process_data_set_ids(data_set_ids, data_set_external_ids) @@ -97,11 +97,11 @@ async def retrieve( """`Retrieve one or more label definitions by external id. `_ Args: - external_id (str | SequenceNotStr[str]): External ID or list of external ids - ignore_unknown_ids (bool): If True, ignore IDs and external IDs that are not found rather than throw an exception. + external_id: External ID or list of external ids + ignore_unknown_ids: If True, ignore IDs and external IDs that are not found rather than throw an exception. Returns: - LabelDefinition | LabelDefinitionList | None: The requested label definition(s) + The requested label definition(s) Examples: @@ -137,14 +137,14 @@ async def list( """`List Labels `_ Args: - name (str | None): returns the label definitions matching that name - external_id_prefix (str | None): filter label definitions with external ids starting with the prefix specified - data_set_ids (int | Sequence[int] | None): return only labels in the data sets with this id / these ids. - data_set_external_ids (str | SequenceNotStr[str] | None): return only labels in the data sets with this external id / these external ids. - limit (int | None): Maximum number of label definitions to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + name: returns the label definitions matching that name + external_id_prefix: filter label definitions with external ids starting with the prefix specified + data_set_ids: return only labels in the data sets with this id / these ids. + data_set_external_ids: return only labels in the data sets with this external id / these external ids. + limit: Maximum number of label definitions to return. Defaults to 25. Set to -1, float("inf") or None to return all items. Returns: - LabelDefinitionList: List of requested Labels + List of requested Labels Examples: @@ -186,10 +186,10 @@ async def create( """`Create one or more label definitions. `_ Args: - label (LabelDefinition | LabelDefinitionWrite | Sequence[LabelDefinition | LabelDefinitionWrite]): The label definition(s) to create. + label: The label definition(s) to create. Returns: - LabelDefinition | LabelDefinitionList: Created label definition(s) + Created label definition(s) Raises: TypeError: Function input 'label' is of the wrong type @@ -217,7 +217,7 @@ async def delete(self, external_id: str | SequenceNotStr[str] | None = None) -> """`Delete one or more label definitions `_ Args: - external_id (str | SequenceNotStr[str] | None): One or more label external ids + external_id: One or more label external ids Examples: diff --git a/cognite/client/_api/limits.py b/cognite/client/_api/limits.py index ff9fadc045..0c14531e44 100644 --- a/cognite/client/_api/limits.py +++ b/cognite/client/_api/limits.py @@ -31,13 +31,10 @@ async def retrieve(self, id: str) -> Limit | None: Retrieves a limit value by its `limitId`. Args: - id (str): Limit ID to retrieve. - Limits are identified by an id containing the service name and a service-scoped limit name. - For instance `atlas.monthly_ai_tokens` is the id of the `atlas` service limit `monthly_ai_tokens`. - Service and limit names are always in `lower_snake_case`. + id: Limit ID to retrieve. Limits are identified by an id containing the service name and a service-scoped limit name. For instance `atlas.monthly_ai_tokens` is the id of the `atlas` service limit `monthly_ai_tokens`. Service and limit names are always in `lower_snake_case`. Returns: - Limit | None: The requested limit, or `None` if not found. + The requested limit, or `None` if not found. Examples: @@ -64,11 +61,11 @@ async def list(self, filter: Prefix | None = None, limit: int | None = DEFAULT_L Retrieves all limit values for a specific project. Optionally filter by limit ID prefix using a `Prefix` filter. Args: - filter (Prefix | None): Optional `Prefix` filter to apply on the `limitId` property (only `Prefix` filters are supported). - limit (int | None): Maximum number of limits to return. Defaults to 25. Set to None or -1 to return all limits + filter: Optional `Prefix` filter to apply on the `limitId` property (only `Prefix` filters are supported). + limit: Maximum number of limits to return. Defaults to 25. Set to None or -1 to return all limits Returns: - LimitList: List of all limit values in the project. + List of all limit values in the project. Examples: diff --git a/cognite/client/_api/org_apis/principals.py b/cognite/client/_api/org_apis/principals.py index 057bc0ce59..3696e7eb96 100644 --- a/cognite/client/_api/org_apis/principals.py +++ b/cognite/client/_api/org_apis/principals.py @@ -18,8 +18,7 @@ async def me(self) -> Principal: """`Get the current caller's information. `_ Returns: - Principal: The principal of the user running the code, i.e. the - principal *this* AsyncCogniteClient was instantiated with. + The principal of the user running the code, i.e. the principal *this* AsyncCogniteClient was instantiated with. Examples: Get your own principal: @@ -85,12 +84,12 @@ async def retrieve( """`Retrieve principal by reference in the organization `_ Args: - id (str | SequenceNotStr[str] | None): The ID(s) of the principal(s) to retrieve. - external_id (str | SequenceNotStr[str] | None): The external ID(s) of the principal to retrieve. - ignore_unknown_ids (bool): This is only relevant when retrieving multiple principals. If set to True, the method will return the principals that were found and ignore the ones that were not found. If set to False, the method will raise a CogniteAPIError if any of the specified principals were not found. Defaults to False. + id: The ID(s) of the principal(s) to retrieve. + external_id: The external ID(s) of the principal to retrieve. + ignore_unknown_ids: This is only relevant when retrieving multiple principals. If set to True, the method will return the principals that were found and ignore the ones that were not found. If set to False, the method will raise a CogniteAPIError if any of the specified principals were not found. Defaults to False. Returns: - Principal | PrincipalList | None: The principal(s) with the specified ID(s) or external ID(s). + The principal(s) with the specified ID(s) or external ID(s). Examples: Retrieve a principal by ID: @@ -114,11 +113,11 @@ async def list(self, types: str | Sequence[str] | None = None, limit: int = DEFA """`List principals in the organization `_ Args: - types (str | Sequence[str] | None): Filter by principal type(s). Defaults to None, which means no filtering. - limit (int): The maximum number of principals to return. Defaults to 25. + types: Filter by principal type(s). Defaults to None, which means no filtering. + limit: The maximum number of principals to return. Defaults to 25. Returns: - PrincipalList: The principal of the user running the code, i.e. the principal *this* CogniteClient was instantiated with. + The principal of the user running the code, i.e. the principal *this* CogniteClient was instantiated with. Examples: List principals in the organization: diff --git a/cognite/client/_api/postgres_gateway/tables.py b/cognite/client/_api/postgres_gateway/tables.py index f2de65c6c0..2032633df3 100644 --- a/cognite/client/_api/postgres_gateway/tables.py +++ b/cognite/client/_api/postgres_gateway/tables.py @@ -38,11 +38,11 @@ async def __call__( Fetches custom tables as they are iterated over, so you keep a limited number of custom tables in memory. Args: - chunk_size (int | None): Number of custom tables to return in each chunk. Defaults to yielding one custom table at a time. - limit (int | None): Maximum number of custom tables to return. Defaults to return all. + chunk_size: Number of custom tables to return in each chunk. Defaults to yielding one custom table at a time. + limit: Maximum number of custom tables to return. Defaults to return all. Yields: - pg.Table | pg.TableList: yields Table one by one if chunk_size is not specified, else TableList objects. + yields Table one by one if chunk_size is not specified, else TableList objects. """ async for item in self._list_generator( # type: ignore [call-overload] list_cls=pg.TableList, @@ -63,11 +63,11 @@ async def create(self, username: str, items: pg.TableWrite | Sequence[pg.TableWr """`Create tables `_ Args: - username (str): The name of the username (a.k.a. database) to be managed from the API - items (pg.TableWrite | Sequence[pg.TableWrite]): The table(s) to create + username: The name of the username (a.k.a. database) to be managed from the API + items: The table(s) to create Returns: - pg.Table | pg.TableList: Created tables + Created tables Examples: @@ -109,12 +109,12 @@ async def retrieve( Retrieve a list of Postgres tables for a user by their table names, optionally ignoring unknown table names Args: - username (str): The username (a.k.a. database) to be managed from the API - tablename (str | SequenceNotStr[str]): The name of the table(s) to be retrieved - ignore_unknown_ids (bool): Ignore table names not found + username: The username (a.k.a. database) to be managed from the API + tablename: The name of the table(s) to be retrieved + ignore_unknown_ids: Ignore table names not found Returns: - pg.Table | pg.TableList | None: Foreign tables + Foreign tables Examples: @@ -144,9 +144,9 @@ async def delete( """`Delete postgres table(s) `_ Args: - username (str): The name of the username (a.k.a. database) to be managed from the API - tablename (str | SequenceNotStr[str]): The name of the table(s) to be deleted - ignore_unknown_ids (bool): Ignore table names that are not found + username: The name of the username (a.k.a. database) to be managed from the API + tablename: The name of the table(s) to be deleted + ignore_unknown_ids: Ignore table names that are not found Examples: @@ -178,12 +178,12 @@ async def list( List all tables in a given project. Args: - username (str): The name of the username (a.k.a. database) to be managed from the API - include_built_ins (Literal['yes', 'no'] | None): Determines if API should return built-in tables or not - limit (int | None): Limits the number of results to be returned. + username: The name of the username (a.k.a. database) to be managed from the API + include_built_ins: Determines if API should return built-in tables or not + limit: Limits the number of results to be returned. Returns: - pg.TableList: Foreign tables + Foreign tables Examples: diff --git a/cognite/client/_api/postgres_gateway/users.py b/cognite/client/_api/postgres_gateway/users.py index ac86bb2616..4d1ad9e885 100644 --- a/cognite/client/_api/postgres_gateway/users.py +++ b/cognite/client/_api/postgres_gateway/users.py @@ -45,11 +45,11 @@ async def __call__( Fetches user as they are iterated over, so you keep a limited number of users in memory. Args: - chunk_size (int | None): Number of users to return in each chunk. Defaults to yielding one user at a time. - limit (int | None): Maximum number of users to return. Defaults to return all. + chunk_size: Number of users to return in each chunk. Defaults to yielding one user at a time. + limit: Maximum number of users to return. Defaults to return all. Yields: - User | UserList: yields User one by one if chunk_size is not specified, else UserList objects. + yields User one by one if chunk_size is not specified, else UserList objects. """ # noqa: DOC404 async for item in self._list_generator( list_cls=UserList, @@ -72,10 +72,10 @@ async def create(self, user: UserWrite | Sequence[UserWrite]) -> UserCreated | U Create postgres users. Args: - user (UserWrite | Sequence[UserWrite]): The user(s) to create. + user: The user(s) to create. Returns: - UserCreated | UserCreatedList: The created user(s) + The created user(s) Examples: @@ -114,10 +114,10 @@ async def update(self, items: UserUpdate | UserWrite | Sequence[UserUpdate | Use Update postgres users Args: - items (UserUpdate | UserWrite | Sequence[UserUpdate | UserWrite]): The user(s) to update. + items: The user(s) to update. Returns: - User | UserList: The updated user(s) + The updated user(s) Examples: @@ -150,9 +150,8 @@ async def delete(self, username: str | SequenceNotStr[str], ignore_unknown_ids: Delete postgres users Args: - username (str | SequenceNotStr[str]): Usernames of the users to delete. - ignore_unknown_ids (bool): Ignore usernames that are not found - + username: Usernames of the users to delete. + ignore_unknown_ids: Ignore usernames that are not found Examples: @@ -186,11 +185,11 @@ async def retrieve(self, username: str | SequenceNotStr[str], ignore_unknown_ids Retrieve a list of postgres users by their usernames, optionally ignoring unknown usernames Args: - username (str | SequenceNotStr[str]): Usernames of the users to retrieve. - ignore_unknown_ids (bool): Ignore usernames that are not found + username: Usernames of the users to retrieve. + ignore_unknown_ids: Ignore usernames that are not found Returns: - User | UserList: The retrieved user(s). + The retrieved user(s). Examples: @@ -215,10 +214,10 @@ async def list(self, limit: int = DEFAULT_LIMIT_READ) -> UserList: List all users in a given project. Args: - limit (int): Limits the number of results to be returned. + limit: Limits the number of results to be returned. Returns: - UserList: A list of users + A list of users Examples: diff --git a/cognite/client/_api/raw/databases.py b/cognite/client/_api/raw/databases.py index f1f9347e9b..7439ff1460 100644 --- a/cognite/client/_api/raw/databases.py +++ b/cognite/client/_api/raw/databases.py @@ -37,11 +37,11 @@ async def __call__( Fetches dbs as they are iterated over, so you keep a limited number of dbs in memory. Args: - chunk_size (int | None): Number of dbs to return in each chunk. Defaults to yielding one db a time. - limit (int | None): Maximum number of dbs to return. Defaults to return all items. + chunk_size: Number of dbs to return in each chunk. Defaults to yielding one db a time. + limit: Maximum number of dbs to return. Defaults to return all items. Yields: - Database | DatabaseList: No description. + No description. """ # noqa: DOC404 async for item in self._list_generator( list_cls=DatabaseList, resource_cls=Database, chunk_size=chunk_size, method="GET", limit=limit @@ -58,10 +58,10 @@ async def create(self, name: str | list[str]) -> Database | DatabaseList: """`Create one or more databases. `_ Args: - name (str | list[str]): A db name or list of db names to create. + name: A db name or list of db names to create. Returns: - Database | DatabaseList: Database or list of databases that has been created. + Database or list of databases that has been created. Examples: @@ -83,8 +83,8 @@ async def delete(self, name: str | SequenceNotStr[str], recursive: bool = False) """`Delete one or more databases. `_ Args: - name (str | SequenceNotStr[str]): A db name or list of db names to delete. - recursive (bool): Recursively delete all tables in the database(s). + name: A db name or list of db names to delete. + recursive: Recursively delete all tables in the database(s). Examples: @@ -117,10 +117,10 @@ async def list(self, limit: int | None = DEFAULT_LIMIT_READ) -> DatabaseList: """`List databases `_ Args: - limit (int | None): Maximum number of databases to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + limit: Maximum number of databases to return. Defaults to 25. Set to -1, float("inf") or None to return all items. Returns: - DatabaseList: List of requested databases. + List of requested databases. Examples: diff --git a/cognite/client/_api/raw/rows.py b/cognite/client/_api/raw/rows.py index 7a3c5834e6..26dbfd7e16 100644 --- a/cognite/client/_api/raw/rows.py +++ b/cognite/client/_api/raw/rows.py @@ -119,20 +119,17 @@ async def __call__( by halting retrieval speed when the callers code can't keep up. Args: - db_name (str): Name of the database. - table_name (str): Name of the table. - chunk_size (int | None): Number of rows to return in each chunk (may be lower). Defaults to yielding one row at a time. - Note: When used together with 'partitions' the default is 10000 (matching the API limit) and there's an implicit minimum of 1000 rows. - partitions (int | None): Retrieve rows in parallel using this number of workers. Defaults to not use concurrency. - The setting is capped at ``global_config.concurrency_settings.raw.read`` and _can_ be used with a finite limit. To prevent unexpected problems - and maximize read throughput, check out `concurrency limits in the API documentation. `_ - limit (int | None): Maximum number of rows to return. Can be used with partitions. Defaults to returning all items. - min_last_updated_time (int | None): Rows must have been last updated after this time (exclusive). Milliseconds since epoch. - max_last_updated_time (int | None): Rows must have been last updated before this time (inclusive). Milliseconds since epoch. - columns (list[str] | None): List of column keys. Set to `None` to retrieving all, use empty list, [], to retrieve only row keys. + db_name: Name of the database. + table_name: Name of the table. + chunk_size: Number of rows to return in each chunk (may be lower). Defaults to yielding one row at a time. Note: When used together with 'partitions' the default is 10000 (matching the API limit) and there's an implicit minimum of 1000 rows. + partitions: Retrieve rows in parallel using this number of workers. Defaults to not use concurrency. The setting is capped at ``global_config.concurrency_settings.raw.read`` and _can_ be used with a finite limit. To prevent unexpected problems and maximize read throughput, check out `concurrency limits in the API documentation. `_ + limit: Maximum number of rows to return. Can be used with partitions. Defaults to returning all items. + min_last_updated_time: Rows must have been last updated after this time (exclusive). Milliseconds since epoch. + max_last_updated_time: Rows must have been last updated before this time (inclusive). Milliseconds since epoch. + columns: List of column keys. Set to `None` to retrieving all, use empty list, [], to retrieve only row keys. Yields: - Row | RowList: An iterator yielding the requested row or rows. + An iterator yielding the requested row or rows. """ # noqa: DOC404 if partitions is None or _RUNNING_IN_BROWSER: iterator = self._list_generator( @@ -258,10 +255,10 @@ async def insert( """`Insert one or more rows into a table. `_ Args: - db_name (str): Name of the database. - table_name (str): Name of the table. - row (Sequence[Row] | Sequence[RowWrite] | Row | RowWrite | dict): The row(s) to insert - ensure_parent (bool): Create database/table if they don't already exist. + db_name: Name of the database. + table_name: Name of the table. + row: The row(s) to insert + ensure_parent: Create database/table if they don't already exist. Examples: @@ -312,11 +309,11 @@ async def insert_dataframe( Uses index for row keys. Args: - db_name (str): Name of the database. - table_name (str): Name of the table. - dataframe (pd.DataFrame): The dataframe to insert. Index will be used as row keys. - ensure_parent (bool): Create database/table if they don't already exist. - dropna (bool): Remove NaNs (but keep None's in dtype=object columns) before inserting. Done individually per column. Default: True + db_name: Name of the database. + table_name: Name of the table. + dataframe: The dataframe to insert. Index will be used as row keys. + ensure_parent: Create database/table if they don't already exist. + dropna: Remove NaNs (but keep None's in dtype=object columns) before inserting. Done individually per column. Default: True Examples: @@ -383,9 +380,9 @@ async def delete(self, db_name: str, table_name: str, key: str | SequenceNotStr[ """`Delete rows from a table. `_ Args: - db_name (str): Name of the database. - table_name (str): Name of the table. - key (str | SequenceNotStr[str]): The key(s) of the row(s) to delete. + db_name: Name of the database. + table_name: Name of the table. + key: The key(s) of the row(s) to delete. Examples: @@ -422,12 +419,12 @@ async def retrieve(self, db_name: str, table_name: str, key: str) -> Row | None: """`Retrieve a single row by key. `_ Args: - db_name (str): Name of the database. - table_name (str): Name of the table. - key (str): The key of the row to retrieve. + db_name: Name of the database. + table_name: Name of the table. + key: The key of the row to retrieve. Returns: - Row | None: The requested row. + The requested row. Examples: @@ -477,21 +474,18 @@ async def retrieve_dataframe( Rowkeys are used as the index. Args: - db_name (str): Name of the database. - table_name (str): Name of the table. - min_last_updated_time (int | None): Rows must have been last updated after this time. Milliseconds since epoch. - max_last_updated_time (int | None): Rows must have been last updated before this time. Milliseconds since epoch. - columns (list[str] | None): List of column keys. Set to `None` to retrieving all, use empty list, [], to retrieve only row keys. - limit (int | None): The number of rows to retrieve. Defaults to 25. Set to -1, float("inf") or None to return all items. - partitions (int | None): Retrieve rows in parallel using this number of workers. Can be used together with a (large) finite limit. - When partitions is not passed, it defaults to 1, i.e. no concurrency for a finite limit and ``global_config.concurrency_settings.raw.read`` - for an unlimited query (will be capped at this value). To prevent unexpected problems and maximize read throughput, check out - `concurrency limits in the API documentation. `_ - last_updated_time_in_index (bool): Use a MultiIndex with row keys and last_updated_time as index. - infer_dtypes (bool): If True, pandas will try to infer dtypes of the columns. Defaults to True. + db_name: Name of the database. + table_name: Name of the table. + min_last_updated_time: Rows must have been last updated after this time. Milliseconds since epoch. + max_last_updated_time: Rows must have been last updated before this time. Milliseconds since epoch. + columns: List of column keys. Set to `None` to retrieving all, use empty list, [], to retrieve only row keys. + limit: The number of rows to retrieve. Defaults to 25. Set to -1, float("inf") or None to return all items. + partitions: Retrieve rows in parallel using this number of workers. Can be used together with a (large) finite limit. When partitions is not passed, it defaults to 1, i.e. no concurrency for a finite limit and ``global_config.concurrency_settings.raw.read`` for an unlimited query (will be capped at this value). To prevent unexpected problems and maximize read throughput, check out `concurrency limits in the API documentation. `_ + last_updated_time_in_index: Use a MultiIndex with row keys and last_updated_time as index. + infer_dtypes: If True, pandas will try to infer dtypes of the columns. Defaults to True. Returns: - pd.DataFrame: The requested rows in a pandas dataframe. + The requested rows in a pandas dataframe. Examples: @@ -549,19 +543,16 @@ async def list( """`List rows in a table. `_ Args: - db_name (str): Name of the database. - table_name (str): Name of the table. - min_last_updated_time (int | None): Rows must have been last updated after this time (exclusive). Milliseconds since epoch. - max_last_updated_time (int | None): Rows must have been last updated before this time (inclusive). Milliseconds since epoch. - columns (list[str] | None): List of column keys. Set to `None` to retrieving all, use empty list, [], to retrieve only row keys. - limit (int | None): The number of rows to retrieve. Can be used with partitions. Defaults to 25. Set to -1, float("inf") or None to return all items. - partitions (int | None): Retrieve rows in parallel using this number of workers. Can be used together with a (large) finite limit. - When partitions is not passed, it defaults to 1, i.e. no concurrency for a finite limit and ``global_config.concurrency_settings.raw.read`` - for an unlimited query (will be capped at this value). To prevent unexpected problems and maximize read throughput, check out - `concurrency limits in the API documentation. `_ + db_name: Name of the database. + table_name: Name of the table. + min_last_updated_time: Rows must have been last updated after this time (exclusive). Milliseconds since epoch. + max_last_updated_time: Rows must have been last updated before this time (inclusive). Milliseconds since epoch. + columns: List of column keys. Set to `None` to retrieving all, use empty list, [], to retrieve only row keys. + limit: The number of rows to retrieve. Can be used with partitions. Defaults to 25. Set to -1, float("inf") or None to return all items. + partitions: Retrieve rows in parallel using this number of workers. Can be used together with a (large) finite limit. When partitions is not passed, it defaults to 1, i.e. no concurrency for a finite limit and ``global_config.concurrency_settings.raw.read`` for an unlimited query (will be capped at this value). To prevent unexpected problems and maximize read throughput, check out `concurrency limits in the API documentation. `_ Returns: - RowList: The requested rows. + The requested rows. Examples: diff --git a/cognite/client/_api/raw/tables.py b/cognite/client/_api/raw/tables.py index d58fa059f9..fbef778055 100644 --- a/cognite/client/_api/raw/tables.py +++ b/cognite/client/_api/raw/tables.py @@ -38,12 +38,12 @@ async def __call__( Fetches tables as they are iterated over, so you keep a limited number of tables in memory. Args: - db_name (str): Name of the database to iterate over tables for - chunk_size (int | None): Number of tables to return in each chunk. Defaults to yielding one table a time. - limit (int | None): Maximum number of tables to return. Defaults to return all items. + db_name: Name of the database to iterate over tables for + chunk_size: Number of tables to return in each chunk. Defaults to yielding one table a time. + limit: Maximum number of tables to return. Defaults to return all items. Yields: - raw.Table | raw.TableList: The tables in the database. + The tables in the database. """ table_iterator = self._list_generator( list_cls=raw.TableList, @@ -66,11 +66,11 @@ async def create(self, db_name: str, name: str | list[str]) -> raw.Table | raw.T """`Create one or more tables. `_ Args: - db_name (str): Database to create the tables in. - name (str | list[str]): A table name or list of table names to create. + db_name: Database to create the tables in. + name: A table name or list of table names to create. Returns: - raw.Table | raw.TableList: raw.Table or list of tables that has been created. + raw.Table or list of tables that has been created. Examples: @@ -99,8 +99,8 @@ async def delete(self, db_name: str, name: str | SequenceNotStr[str]) -> None: """`Delete one or more tables. `_ Args: - db_name (str): Database to delete tables from. - name (str | SequenceNotStr[str]): A table name or list of table names to delete. + db_name: Database to delete tables from. + name: A table name or list of table names to delete. Examples: @@ -154,11 +154,11 @@ async def list(self, db_name: str, limit: int | None = DEFAULT_LIMIT_READ) -> ra """`List tables `_ Args: - db_name (str): The database to list tables from. - limit (int | None): Maximum number of tables to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + db_name: The database to list tables from. + limit: Maximum number of tables to return. Defaults to 25. Set to -1, float("inf") or None to return all items. Returns: - raw.TableList: List of requested tables. + List of requested tables. Examples: diff --git a/cognite/client/_api/relationships.py b/cognite/client/_api/relationships.py index 0d013e4eb4..ff1218a1e5 100644 --- a/cognite/client/_api/relationships.py +++ b/cognite/client/_api/relationships.py @@ -100,25 +100,25 @@ async def __call__( Fetches relationships as they are iterated over, so you keep a limited number of relationships in memory. Args: - chunk_size (int | None): Number of Relationships to return in each chunk. Defaults to yielding one relationship at a time. - source_external_ids (SequenceNotStr[str] | None): Include relationships that have any of these values in their source External Id field - source_types (SequenceNotStr[str] | None): Include relationships that have any of these values in their source Type field - target_external_ids (SequenceNotStr[str] | None): Include relationships that have any of these values in their target External Id field - target_types (SequenceNotStr[str] | None): Include relationships that have any of these values in their target Type field - data_set_ids (int | Sequence[int] | None): Return only relationships in the specified data set(s) with this id / these ids. - data_set_external_ids (str | SequenceNotStr[str] | None): Return only relationships in the specified data set(s) with this external id / these external ids. - start_time (dict[str, int] | None): Range between two timestamps, minimum and maximum milliseconds (inclusive) - end_time (dict[str, int] | None): Range between two timestamps, minimum and maximum milliseconds (inclusive) - confidence (dict[str, int] | None): Range to filter the field for (inclusive). - last_updated_time (dict[str, int] | None): Range to filter the field for (inclusive). - created_time (dict[str, int] | None): Range to filter the field for (inclusive). - active_at_time (dict[str, int] | None): Limits results to those active at any point within the given time range, i.e. if there is any overlap in the intervals [activeAtTime.min, activeAtTime.max] and [startTime, endTime], where both intervals are inclusive. If a relationship does not have a startTime, it is regarded as active from the beginning of time by this filter. If it does not have an endTime is will be regarded as active until the end of time. Similarly, if a min is not supplied to the filter, the min will be implicitly set to the beginning of time, and if a max is not supplied, the max will be implicitly set to the end of time. - labels (LabelFilter | None): Return only the resource matching the specified label constraints. - limit (int | None): No description. - fetch_resources (bool): No description. + chunk_size: Number of Relationships to return in each chunk. Defaults to yielding one relationship at a time. + source_external_ids: Include relationships that have any of these values in their source External Id field + source_types: Include relationships that have any of these values in their source Type field + target_external_ids: Include relationships that have any of these values in their target External Id field + target_types: Include relationships that have any of these values in their target Type field + data_set_ids: Return only relationships in the specified data set(s) with this id / these ids. + data_set_external_ids: Return only relationships in the specified data set(s) with this external id / these external ids. + start_time: Range between two timestamps, minimum and maximum milliseconds (inclusive) + end_time: Range between two timestamps, minimum and maximum milliseconds (inclusive) + confidence: Range to filter the field for (inclusive). + last_updated_time: Range to filter the field for (inclusive). + created_time: Range to filter the field for (inclusive). + active_at_time: Limits results to those active at any point within the given time range, i.e. if there is any overlap in the intervals [activeAtTime.min, activeAtTime.max] and [startTime, endTime], where both intervals are inclusive. If a relationship does not have a startTime, it is regarded as active from the beginning of time by this filter. If it does not have an endTime is will be regarded as active until the end of time. Similarly, if a min is not supplied to the filter, the min will be implicitly set to the beginning of time, and if a max is not supplied, the max will be implicitly set to the end of time. + labels: Return only the resource matching the specified label constraints. + limit: No description. + fetch_resources: No description. Yields: - Relationship | RelationshipList: yields Relationship one by one if chunk_size is not specified, else RelationshipList objects. + yields Relationship one by one if chunk_size is not specified, else RelationshipList objects. """ # noqa: DOC404 data_set_ids_processed = process_data_set_ids(data_set_ids, data_set_external_ids) filter = RelationshipFilter( @@ -156,11 +156,11 @@ async def retrieve(self, external_id: str, fetch_resources: bool = False) -> Rel """Retrieve a single relationship by external id. Args: - external_id (str): External ID - fetch_resources (bool): If true, will try to return the full resources referenced by the relationship in the source and target fields. + external_id: External ID + fetch_resources: If true, will try to return the full resources referenced by the relationship in the source and target fields. Returns: - Relationship | None: Requested relationship or None if it does not exist. + Requested relationship or None if it does not exist. Examples: @@ -185,13 +185,12 @@ async def retrieve_multiple( """`Retrieve multiple relationships by external id. `_ Args: - external_ids (SequenceNotStr[str]): External IDs - fetch_resources (bool): If true, will try to return the full resources referenced by the relationship in the - source and target fields. - ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception. + external_ids: External IDs + fetch_resources: If true, will try to return the full resources referenced by the relationship in the source and target fields. + ignore_unknown_ids: Ignore IDs and external IDs that are not found rather than throw an exception. Returns: - RelationshipList: The requested relationships. + The requested relationships. Examples: @@ -233,25 +232,25 @@ async def list( """`Lists relationships stored in the project based on a query filter given in the payload of this request. Up to 1000 relationships can be retrieved in one operation. `_ Args: - source_external_ids (SequenceNotStr[str] | None): Include relationships that have any of these values in their source External Id field - source_types (SequenceNotStr[str] | None): Include relationships that have any of these values in their source Type field - target_external_ids (SequenceNotStr[str] | None): Include relationships that have any of these values in their target External Id field - target_types (SequenceNotStr[str] | None): Include relationships that have any of these values in their target Type field - data_set_ids (int | Sequence[int] | None): Return only relationships in the specified data set(s) with this id / these ids. - data_set_external_ids (str | SequenceNotStr[str] | None): Return only relationships in the specified data set(s) with this external id / these external ids. - start_time (dict[str, int] | None): Range between two timestamps, minimum and maximum milliseconds (inclusive) - end_time (dict[str, int] | None): Range between two timestamps, minimum and maximum milliseconds (inclusive) - confidence (dict[str, int] | None): Range to filter the field for (inclusive). - last_updated_time (dict[str, int] | None): Range to filter the field for (inclusive). - created_time (dict[str, int] | None): Range to filter the field for (inclusive). - active_at_time (dict[str, int] | None): Limits results to those active at any point within the given time range, i.e. if there is any overlap in the intervals [activeAtTime.min, activeAtTime.max] and [startTime, endTime], where both intervals are inclusive. If a relationship does not have a startTime, it is regarded as active from the beginning of time by this filter. If it does not have an endTime is will be regarded as active until the end of time. Similarly, if a min is not supplied to the filter, the min will be implicitly set to the beginning of time, and if a max is not supplied, the max will be implicitly set to the end of time. - labels (LabelFilter | None): Return only the resource matching the specified label constraints. - limit (int | None): Maximum number of relationships to return. Defaults to 25. Set to -1, float("inf") or None to return all items. - partitions (int | None): Retrieve relationships in parallel using this number of workers. Also requires `limit=None` to be passed. - fetch_resources (bool): if true, will try to return the full resources referenced by the relationship in the source and target fields. + source_external_ids: Include relationships that have any of these values in their source External Id field + source_types: Include relationships that have any of these values in their source Type field + target_external_ids: Include relationships that have any of these values in their target External Id field + target_types: Include relationships that have any of these values in their target Type field + data_set_ids: Return only relationships in the specified data set(s) with this id / these ids. + data_set_external_ids: Return only relationships in the specified data set(s) with this external id / these external ids. + start_time: Range between two timestamps, minimum and maximum milliseconds (inclusive) + end_time: Range between two timestamps, minimum and maximum milliseconds (inclusive) + confidence: Range to filter the field for (inclusive). + last_updated_time: Range to filter the field for (inclusive). + created_time: Range to filter the field for (inclusive). + active_at_time: Limits results to those active at any point within the given time range, i.e. if there is any overlap in the intervals [activeAtTime.min, activeAtTime.max] and [startTime, endTime], where both intervals are inclusive. If a relationship does not have a startTime, it is regarded as active from the beginning of time by this filter. If it does not have an endTime is will be regarded as active until the end of time. Similarly, if a min is not supplied to the filter, the min will be implicitly set to the beginning of time, and if a max is not supplied, the max will be implicitly set to the end of time. + labels: Return only the resource matching the specified label constraints. + limit: Maximum number of relationships to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + partitions: Retrieve relationships in parallel using this number of workers. Also requires `limit=None` to be passed. + fetch_resources: if true, will try to return the full resources referenced by the relationship in the source and target fields. Returns: - RelationshipList: List of requested relationships + List of requested relationships Examples: @@ -349,10 +348,10 @@ async def create( """`Create one or more relationships. `_ Args: - relationship (Relationship | RelationshipWrite | Sequence[Relationship | RelationshipWrite]): Relationship or list of relationships to create. + relationship: Relationship or list of relationships to create. Returns: - Relationship | RelationshipList: Created relationship(s) + Created relationship(s) Note: - The source_type and target_type field in the Relationship(s) can be any string among "Asset", "TimeSeries", "File", "Event", "Sequence". @@ -419,11 +418,11 @@ async def update( Currently, a full replacement of labels on a relationship is not supported (only partial add/remove updates). See the example below on how to perform partial labels update. Args: - item (Relationship | RelationshipWrite | RelationshipUpdate | Sequence[Relationship | RelationshipWrite | RelationshipUpdate]): Relationship(s) to update - mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (Relationship or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. + item: Relationship(s) to update + mode: How to update data when a non-update object is given (Relationship or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. Returns: - Relationship | RelationshipList: Updated relationship(s) + Updated relationship(s) Examples: Update a data set that you have fetched. This will perform a full update of the data set: @@ -482,11 +481,11 @@ async def upsert( For more details, see :ref:`appendix-upsert`. Args: - item (Relationship | RelationshipWrite | Sequence[Relationship | RelationshipWrite]): Relationship or list of relationships to upsert. - mode (Literal['patch', 'replace']): Whether to patch or replace in the case the relationships are existing. If you set 'patch', the call will only update fields with non-null values (default). Setting 'replace' will unset any fields that are not specified. + item: Relationship or list of relationships to upsert. + mode: Whether to patch or replace in the case the relationships are existing. If you set 'patch', the call will only update fields with non-null values (default). Setting 'replace' will unset any fields that are not specified. Returns: - Relationship | RelationshipList: The upserted relationship(s). + The upserted relationship(s). Examples: @@ -520,8 +519,8 @@ async def delete(self, external_id: str | SequenceNotStr[str], ignore_unknown_id """`Delete one or more relationships. `_ Args: - external_id (str | SequenceNotStr[str]): External ID or list of external ids - ignore_unknown_ids (bool): Ignore external IDs that are not found rather than throw an exception. + external_id: External ID or list of external ids + ignore_unknown_ids: Ignore external IDs that are not found rather than throw an exception. Examples: Delete relationships by external id: diff --git a/cognite/client/_api/sequence_data.py b/cognite/client/_api/sequence_data.py index c04adddb06..f3cbd4bc58 100644 --- a/cognite/client/_api/sequence_data.py +++ b/cognite/client/_api/sequence_data.py @@ -45,10 +45,10 @@ async def insert( """`Insert rows into a sequence `_ Args: - rows (SequenceRows | dict[int, typing.Sequence[int | float | str]] | typing.Sequence[tuple[int, typing.Sequence[int | float | str]]] | typing.Sequence[dict[str, Any]]): The rows you wish to insert. Can either be a list of tuples, a list of {"rowNumber":... ,"values": ...} objects, a dictionary of rowNumber: data, or a SequenceData object. See examples below. - columns (SequenceNotStr[str] | None): List of external id for the columns of the sequence. - id (int | None): Id of sequence to insert rows into. - external_id (str | None): External id of sequence to insert rows into. + rows: The rows you wish to insert. Can either be a list of tuples, a list of {"rowNumber":... ,"values": ...} objects, a dictionary of rowNumber: data, or a SequenceData object. See examples below. + columns: List of external id for the columns of the sequence. + id: Id of sequence to insert rows into. + external_id: External id of sequence to insert rows into. Examples: Your rows of data can be a list of tuples where the first element is the rownumber and the second element is the data to be inserted: @@ -127,10 +127,10 @@ async def insert_dataframe( The sequence and columns must already exist. Args: - dataframe (pd.DataFrame): Pandas DataFrame object containing the sequence data. - id (int | None): Id of sequence to insert rows into. - external_id (str | None): External id of sequence to insert rows into. - dropna (bool): Whether to drop rows where all values are missing. Default: True. + dataframe: Pandas DataFrame object containing the sequence data. + id: Id of sequence to insert rows into. + external_id: External id of sequence to insert rows into. + dropna: Whether to drop rows where all values are missing. Default: True. Examples: Insert three rows into columns 'col_a' and 'col_b' of the sequence with id=123: @@ -157,9 +157,9 @@ async def delete(self, rows: typing.Sequence[int], id: int | None = None, extern """`Delete rows from a sequence `_ Args: - rows (typing.Sequence[int]): List of row numbers. - id (int | None): Id of sequence to delete rows from. - external_id (str | None): External id of sequence to delete rows from. + rows: List of row numbers. + id: Id of sequence to delete rows from. + external_id: External id of sequence to delete rows from. Examples: @@ -185,10 +185,10 @@ async def delete_range( """`Delete a range of rows from a sequence. Note this operation is potentially slow, as retrieves each row before deleting. `_ Args: - start (int): Row number to start from (inclusive). - end (int | None): Upper limit on the row number (exclusive). Set to None or -1 to delete all rows until end of sequence. - id (int | None): Id of sequence to delete rows from. - external_id (str | None): External id of sequence to delete rows from. + start: Row number to start from (inclusive). + end: Upper limit on the row number (exclusive). Set to None or -1 to delete all rows until end of sequence. + id: Id of sequence to delete rows from. + external_id: External id of sequence to delete rows from. Examples: @@ -286,15 +286,15 @@ async def retrieve( """`Retrieve data from a sequence `_ Args: - external_id (str | SequenceNotStr[str] | None): The external id of the sequence to retrieve from. - id (int | typing.Sequence[int] | None): The internal if the sequence to retrieve from. - start (int): Row number to start from (inclusive). - end (int | None): Upper limit on the row number (exclusive). Set to None or -1 to get all rows until end of sequence. - columns (SequenceNotStr[str] | None): List of external id for the columns of the sequence. If 'None' is passed, all columns will be retrieved. - limit (int | None): Maximum number of rows to return per sequence. Pass None to fetch all (possibly limited by 'end'). + external_id: The external id of the sequence to retrieve from. + id: The internal if the sequence to retrieve from. + start: Row number to start from (inclusive). + end: Upper limit on the row number (exclusive). Set to None or -1 to get all rows until end of sequence. + columns: List of external id for the columns of the sequence. If 'None' is passed, all columns will be retrieved. + limit: Maximum number of rows to return per sequence. Pass None to fetch all (possibly limited by 'end'). Returns: - SequenceRows | SequenceRowsList: SequenceRows if a single identifier was given, else SequenceRowsList + SequenceRows if a single identifier was given, else SequenceRowsList Examples: @@ -342,13 +342,13 @@ async def retrieve_last_row( """`Retrieves the last row (i.e the row with the highest row number) in a sequence. `_ Args: - id (int | None): Id or list of ids. - external_id (str | None): External id or list of external ids. - columns (SequenceNotStr[str] | None): List of external id for the columns of the sequence. If 'None' is passed, all columns will be retrieved. - before (int | None): (optional, int): Get latest datapoint before this row number. + id: Id or list of ids. + external_id: External id or list of external ids. + columns: List of external id for the columns of the sequence. If 'None' is passed, all columns will be retrieved. + before: Get latest datapoint before this row number. Returns: - SequenceRows: A Datapoints object containing the requested data, or a list of such objects. + A Datapoints object containing the requested data, or a list of such objects. Examples: @@ -380,16 +380,16 @@ async def retrieve_dataframe( """`Retrieve data from a sequence as a pandas dataframe `_ Args: - start (int): (inclusive) row number to start from. - end (int | None): (exclusive) upper limit on the row number. Set to None or -1 to get all rows until end of sequence. - columns (list[str] | None): List of external id for the columns of the sequence. If 'None' is passed, all columns will be retrieved. - external_id (str | None): External id of sequence. - column_names (str | None): Which field(s) to use as column header. Can use "externalId", "id", "columnExternalId", "id|columnExternalId" or "externalId|columnExternalId". Default is "externalId|columnExternalId" for queries on more than one sequence, and "columnExternalId" for queries on a single sequence. - id (int | None): Id of sequence - limit (int | None): Maximum number of rows to return per sequence. + start: (inclusive) row number to start from. + end: (exclusive) upper limit on the row number. Set to None or -1 to get all rows until end of sequence. + columns: List of external id for the columns of the sequence. If 'None' is passed, all columns will be retrieved. + external_id: External id of sequence. + column_names: Which field(s) to use as column header. Can use "externalId", "id", "columnExternalId", "id|columnExternalId" or "externalId|columnExternalId". Default is "externalId|columnExternalId" for queries on more than one sequence, and "columnExternalId" for queries on a single sequence. + id: Id of sequence + limit: Maximum number of rows to return per sequence. Returns: - pd.DataFrame: The requested sequence data in a pandas DataFrame + The requested sequence data in a pandas DataFrame Examples: >>> from cognite.client import CogniteClient, AsyncCogniteClient diff --git a/cognite/client/_api/sequences.py b/cognite/client/_api/sequences.py index 026b98fce2..9aba2fff3e 100644 --- a/cognite/client/_api/sequences.py +++ b/cognite/client/_api/sequences.py @@ -128,23 +128,23 @@ async def __call__( Fetches sequences as they are iterated over, so you keep a limited number of objects in memory. Args: - chunk_size (int | None): Number of sequences to return in each chunk. Defaults to yielding one event a time. - name (str | None): Filter out sequences that do not have this *exact* name. - external_id_prefix (str | None): Filter out sequences that do not have this string as the start of the externalId - metadata (dict[str, str] | None): Filter out sequences that do not match these metadata fields and values (case-sensitive). Format is {"key1":"value1","key2":"value2"}. - asset_ids (typing.Sequence[int] | None): Filter out sequences that are not linked to any of these assets. - asset_subtree_ids (int | typing.Sequence[int] | None): Only include sequences that have a related asset in a subtree rooted at any of these assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. - asset_subtree_external_ids (str | SequenceNotStr[str] | None): Only include sequences that have a related asset in a subtree rooted at any of these assetExternalIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. - data_set_ids (int | typing.Sequence[int] | None): Return only sequences in the specified data set(s) with this id / these ids. - data_set_external_ids (str | SequenceNotStr[str] | None): Return only sequences in the specified data set(s) with this external id / these external ids. - created_time (dict[str, Any] | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. - last_updated_time (dict[str, Any] | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. - limit (int | None): Max number of sequences to return. Defaults to return all items. - advanced_filter (Filter | dict[str, Any] | None): Advanced filter query using the filter DSL (Domain Specific Language). It allows defining complex filtering expressions that combine simple operations, such as equals, prefix, exists, etc., using boolean operators and, or, and not. - sort (SortSpec | list[SortSpec] | None): The criteria to sort by. Defaults to desc for `_score_` and asc for all other properties. Sort is not allowed if `partitions` is used. + chunk_size: Number of sequences to return in each chunk. Defaults to yielding one event a time. + name: Filter out sequences that do not have this *exact* name. + external_id_prefix: Filter out sequences that do not have this string as the start of the externalId + metadata: Filter out sequences that do not match these metadata fields and values (case-sensitive). Format is {"key1":"value1","key2":"value2"}. + asset_ids: Filter out sequences that are not linked to any of these assets. + asset_subtree_ids: Only include sequences that have a related asset in a subtree rooted at any of these assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. + asset_subtree_external_ids: Only include sequences that have a related asset in a subtree rooted at any of these assetExternalIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. + data_set_ids: Return only sequences in the specified data set(s) with this id / these ids. + data_set_external_ids: Return only sequences in the specified data set(s) with this external id / these external ids. + created_time: Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. + last_updated_time: Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. + limit: Max number of sequences to return. Defaults to return all items. + advanced_filter: Advanced filter query using the filter DSL (Domain Specific Language). It allows defining complex filtering expressions that combine simple operations, such as equals, prefix, exists, etc., using boolean operators and, or, and not. + sort: The criteria to sort by. Defaults to desc for `_score_` and asc for all other properties. Sort is not allowed if `partitions` is used. Yields: - Sequence | SequenceList: yields Sequence one by one if chunk_size is not specified, else SequenceList objects. + yields Sequence one by one if chunk_size is not specified, else SequenceList objects. """ # noqa: DOC404 asset_subtree_ids_processed = process_asset_subtree_ids(asset_subtree_ids, asset_subtree_external_ids) data_set_ids_processed = process_data_set_ids(data_set_ids, data_set_external_ids) @@ -179,11 +179,11 @@ async def retrieve(self, id: int | None = None, external_id: str | None = None) """`Retrieve a single sequence by id. `_ Args: - id (int | None): ID - external_id (str | None): External ID + id: ID + external_id: External ID Returns: - Sequence | None: Requested sequence or None if it does not exist. + Requested sequence or None if it does not exist. Examples: @@ -210,12 +210,12 @@ async def retrieve_multiple( """`Retrieve multiple sequences by id. `_ Args: - ids (typing.Sequence[int] | None): IDs - external_ids (SequenceNotStr[str] | None): External IDs - ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception. + ids: IDs + external_ids: External IDs + ignore_unknown_ids: Ignore IDs and external IDs that are not found rather than throw an exception. Returns: - SequenceList: The requested sequences. + The requested sequences. Examples: @@ -243,11 +243,11 @@ async def aggregate_count( """`Count of sequences matching the specified filters and search. `_ Args: - advanced_filter (Filter | dict[str, Any] | None): The filter to narrow down the sequences to count. - filter (SequenceFilter | dict[str, Any] | None): The filter to narrow down sequences to count requiring exact match. + advanced_filter: The filter to narrow down the sequences to count. + filter: The filter to narrow down sequences to count requiring exact match. Returns: - int: The number of sequences matching the specified filters and search. + The number of sequences matching the specified filters and search. Examples: @@ -284,13 +284,13 @@ async def aggregate_cardinality_values( """`Find approximate property count for sequences. `_ Args: - property (SequenceProperty | str | list[str]): The property to count the cardinality of. - advanced_filter (Filter | dict[str, Any] | None): The filter to narrow down the sequences to count cardinality. - aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets. - filter (SequenceFilter | dict[str, Any] | None): The filter to narrow down the sequences to count requiring exact match. + property: The property to count the cardinality of. + advanced_filter: The filter to narrow down the sequences to count cardinality. + aggregate_filter: The filter to apply to the resulting buckets. + filter: The filter to narrow down the sequences to count requiring exact match. Returns: - int: The number of properties matching the specified filters and search. + The number of properties matching the specified filters and search. Examples: @@ -334,13 +334,13 @@ async def aggregate_cardinality_properties( """`Find approximate paths count for sequences. `_ Args: - path (SequenceProperty | str | list[str]): The scope in every document to aggregate properties. The only value allowed now is ["metadata"]. It means to aggregate only metadata properties (aka keys). - advanced_filter (Filter | dict[str, Any] | None): The filter to narrow down the sequences to count cardinality. - aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets. - filter (SequenceFilter | dict[str, Any] | None): The filter to narrow down the sequences to count requiring exact match. + path: The scope in every document to aggregate properties. The only value allowed now is ["metadata"]. It means to aggregate only metadata properties (aka keys). + advanced_filter: The filter to narrow down the sequences to count cardinality. + aggregate_filter: The filter to apply to the resulting buckets. + filter: The filter to narrow down the sequences to count requiring exact match. Returns: - int: The number of properties matching the specified filters and search. + The number of properties matching the specified filters and search. Examples: @@ -372,13 +372,13 @@ async def aggregate_unique_values( """`Get unique paths with counts for sequences. `_ Args: - property (SequenceProperty | str | list[str]): The property to group by. - advanced_filter (Filter | dict[str, Any] | None): The filter to narrow down the sequences to count cardinality. - aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets. - filter (SequenceFilter | dict[str, Any] | None): The filter to narrow down the sequences to count requiring exact match. + property: The property to group by. + advanced_filter: The filter to narrow down the sequences to count cardinality. + aggregate_filter: The filter to apply to the resulting buckets. + filter: The filter to narrow down the sequences to count requiring exact match. Returns: - UniqueResultList: List of unique values of sequences matching the specified filters and search. + List of unique values of sequences matching the specified filters and search. Examples: @@ -440,13 +440,13 @@ async def aggregate_unique_properties( """`Find approximate unique sequence properties. `_ Args: - path (SequenceProperty | str | list[str]): The scope in every document to aggregate properties. The only value allowed now is ["metadata"]. It means to aggregate only metadata properties (aka keys). - advanced_filter (Filter | dict[str, Any] | None): The filter to narrow down the sequences to count cardinality. - aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets. - filter (SequenceFilter | dict[str, Any] | None): The filter to narrow down the sequences to count requiring exact match. + path: The scope in every document to aggregate properties. The only value allowed now is ["metadata"]. It means to aggregate only metadata properties (aka keys). + advanced_filter: The filter to narrow down the sequences to count cardinality. + aggregate_filter: The filter to apply to the resulting buckets. + filter: The filter to narrow down the sequences to count requiring exact match. Returns: - UniqueResultList: List of unique values of sequences matching the specified filters and search. + List of unique values of sequences matching the specified filters and search. Examples: @@ -480,10 +480,10 @@ async def create( """`Create one or more sequences. `_ Args: - sequence (Sequence | SequenceWrite | typing.Sequence[Sequence] | typing.Sequence[SequenceWrite]): Sequence or list of Sequence to create. The Sequence columns parameter is a list of objects with fields `externalId` (external id of the column, when omitted, they will be given ids of 'column0, column1, ...'), `valueType` (data type of the column, either STRING, LONG, or DOUBLE, with default DOUBLE), `name`, `description`, `metadata` (optional fields to describe and store information about the data in the column). Other fields will be removed automatically, so a columns definition from a different sequence object can be passed here. + sequence: Sequence or list of Sequence to create. The Sequence columns parameter is a list of objects with fields `externalId` (external id of the column, when omitted, they will be given ids of 'column0, column1, ...'), `valueType` (data type of the column, either STRING, LONG, or DOUBLE, with default DOUBLE), `name`, `description`, `metadata` (optional fields to describe and store information about the data in the column). Other fields will be removed automatically, so a columns definition from a different sequence object can be passed here. Returns: - Sequence | SequenceList: The created sequence(s). + The created sequence(s). Examples: @@ -519,9 +519,9 @@ async def delete( """`Delete one or more sequences. `_ Args: - id (int | typing.Sequence[int] | None): Id or list of ids - external_id (str | SequenceNotStr[str] | None): External ID or list of external ids - ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception. + id: Id or list of ids + external_id: External ID or list of external ids + ignore_unknown_ids: Ignore IDs and external IDs that are not found rather than throw an exception. Examples: @@ -560,11 +560,11 @@ async def update( """`Update one or more sequences. `_ Args: - item (Sequence | SequenceWrite | SequenceUpdate | typing.Sequence[Sequence | SequenceWrite | SequenceUpdate]): Sequences to update - mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (Sequence or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. + item: Sequences to update + mode: How to update data when a non-update object is given (Sequence or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. Returns: - Sequence | SequenceList: Updated sequences. + Updated sequences. Examples: @@ -662,11 +662,11 @@ async def upsert( For more details, see :ref:`appendix-upsert`. Args: - item (Sequence | SequenceWrite | typing.Sequence[Sequence | SequenceWrite]): Sequence or list of sequences to upsert. - mode (Literal['patch', 'replace']): Whether to patch or replace in the case the sequences are existing. If you set 'patch', the call will only update fields with non-null values (default). Setting 'replace' will unset any fields that are not specified. + item: Sequence or list of sequences to upsert. + mode: Whether to patch or replace in the case the sequences are existing. If you set 'patch', the call will only update fields with non-null values (default). Setting 'replace' will unset any fields that are not specified. Returns: - Sequence | SequenceList: The upserted sequence(s). + The upserted sequence(s). Examples: @@ -795,14 +795,14 @@ async def search( Primarily meant for human-centric use-cases and data exploration, not for programs, since matching and ordering may change over time. Use the `list` function if stable or exact matches are required. Args: - name (str | None): Prefix and fuzzy search on name. - description (str | None): Prefix and fuzzy search on description. - query (str | None): Search on name and description using wildcard search on each of the words (separated by spaces). Retrieves results where at least one word must match. Example: 'some other' - filter (SequenceFilter | dict[str, Any] | None): Filter to apply. Performs exact match on these fields. - limit (int): Max number of results to return. + name: Prefix and fuzzy search on name. + description: Prefix and fuzzy search on description. + query: Search on name and description using wildcard search on each of the words (separated by spaces). Retrieves results where at least one word must match. Example: 'some other' + filter: Filter to apply. Performs exact match on these fields. + limit: Max number of results to return. Returns: - SequenceList: The search result as a SequenceList + The search result as a SequenceList Examples: @@ -843,23 +843,23 @@ async def list( """`List sequences `_ Args: - name (str | None): Filter out sequences that do not have this *exact* name. - external_id_prefix (str | None): Filter out sequences that do not have this string as the start of the externalId - metadata (dict[str, str] | None): Filter out sequences that do not match these metadata fields and values (case-sensitive). Format is {"key1":"value1","key2":"value2"}. - asset_ids (typing.Sequence[int] | None): Filter out sequences that are not linked to any of these assets. - asset_subtree_ids (int | typing.Sequence[int] | None): Only include sequences that have a related asset in a subtree rooted at any of these assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. - asset_subtree_external_ids (str | SequenceNotStr[str] | None): Only include sequences that have a related asset in a subtree rooted at any of these assetExternalIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. - data_set_ids (int | typing.Sequence[int] | None): Return only sequences in the specified data set(s) with this id / these ids. - data_set_external_ids (str | SequenceNotStr[str] | None): Return only sequences in the specified data set(s) with this external id / these external ids. - created_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. - last_updated_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. - limit (int | None): Max number of sequences to return. Defaults to 25. Set to -1, float("inf") or None to return all items. - partitions (int | None): Retrieve resources in parallel using this number of workers (values up to 10 allowed), limit must be set to `None` (or `-1`). - advanced_filter (Filter | dict[str, Any] | None): Advanced filter query using the filter DSL (Domain Specific Language). It allows defining complex filtering expressions that combine simple operations, such as equals, prefix, exists, etc., using boolean operators and, or, and not. See examples below for usage. - sort (SortSpec | list[SortSpec] | None): The criteria to sort by. Defaults to desc for `_score_` and asc for all other properties. Sort is not allowed if `partitions` is used. + name: Filter out sequences that do not have this *exact* name. + external_id_prefix: Filter out sequences that do not have this string as the start of the externalId + metadata: Filter out sequences that do not match these metadata fields and values (case-sensitive). Format is {"key1":"value1","key2":"value2"}. + asset_ids: Filter out sequences that are not linked to any of these assets. + asset_subtree_ids: Only include sequences that have a related asset in a subtree rooted at any of these assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. + asset_subtree_external_ids: Only include sequences that have a related asset in a subtree rooted at any of these assetExternalIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. + data_set_ids: Return only sequences in the specified data set(s) with this id / these ids. + data_set_external_ids: Return only sequences in the specified data set(s) with this external id / these external ids. + created_time: Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. + last_updated_time: Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. + limit: Max number of sequences to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + partitions: Retrieve resources in parallel using this number of workers (values up to 10 allowed), limit must be set to `None` (or `-1`). + advanced_filter: Advanced filter query using the filter DSL (Domain Specific Language). It allows defining complex filtering expressions that combine simple operations, such as equals, prefix, exists, etc., using boolean operators and, or, and not. See examples below for usage. + sort: The criteria to sort by. Defaults to desc for `_score_` and asc for all other properties. Sort is not allowed if `partitions` is used. Returns: - SequenceList: The requested sequences. + The requested sequences. .. note:: When using `partitions`, there are few considerations to keep in mind: diff --git a/cognite/client/_api/simulators/__init__.py b/cognite/client/_api/simulators/__init__.py index 3886397c0b..c377825a77 100644 --- a/cognite/client/_api/simulators/__init__.py +++ b/cognite/client/_api/simulators/__init__.py @@ -46,11 +46,11 @@ async def __call__( Fetches simulators as they are iterated over, so you keep a limited number of simulators in memory. Args: - chunk_size (int | None): Number of simulators to return in each chunk. Defaults to yielding one simulator a time. - limit (int | None): Maximum number of simulators to return. Defaults to return all items. + chunk_size: Number of simulators to return in each chunk. Defaults to yielding one simulator a time. + limit: Maximum number of simulators to return. Defaults to return all items. Yields: - Simulator | SimulatorList: yields Simulator one by one if chunk is not specified, else SimulatorList objects. + yields Simulator one by one if chunk is not specified, else SimulatorList objects. """ # noqa: DOC404 async for item in self._list_generator( list_cls=SimulatorList, @@ -65,10 +65,10 @@ async def list(self, limit: int | None = DEFAULT_LIMIT_READ) -> SimulatorList: """`List all simulators `_ Args: - limit (int | None): Maximum number of results to return. Defaults to 25. Set to -1, float(“inf”) or None to return all items. + limit: Maximum number of results to return. Defaults to 25. Set to -1, float(“inf”) or None to return all items. Returns: - SimulatorList: List of simulators + List of simulators Examples: List simulators: diff --git a/cognite/client/_api/simulators/integrations.py b/cognite/client/_api/simulators/integrations.py index 93dcb515dd..4fd0371e6b 100644 --- a/cognite/client/_api/simulators/integrations.py +++ b/cognite/client/_api/simulators/integrations.py @@ -56,13 +56,13 @@ async def __call__( Fetches simulator integrations as they are iterated over, so you keep a limited number of simulator integrations in memory. Args: - chunk_size (int | None): Number of simulator integrations to return in each chunk. Defaults to yielding one simulator integration a time. - simulator_external_ids (str | SequenceNotStr[str] | None): Filter on simulator external ids. - active (bool | None): Filter on active status of the simulator integration. - limit (int | None): The maximum number of simulator integrations to return, pass None to return all. + chunk_size: Number of simulator integrations to return in each chunk. Defaults to yielding one simulator integration a time. + simulator_external_ids: Filter on simulator external ids. + active: Filter on active status of the simulator integration. + limit: The maximum number of simulator integrations to return, pass None to return all. Yields: - SimulatorIntegration | SimulatorIntegrationList: yields SimulatorIntegration one by one if chunk_size is not specified, else SimulatorIntegrationList objects. + yields SimulatorIntegration one by one if chunk_size is not specified, else SimulatorIntegrationList objects. """ # noqa: DOC404 integrations_filter = SimulatorIntegrationFilter(simulator_external_ids=simulator_external_ids, active=active) async for item in self._list_generator( @@ -86,12 +86,12 @@ async def list( Retrieves a list of simulator integrations that match the given criteria. Args: - limit (int | None): The maximum number of simulator integrations to return, pass None to return all. - simulator_external_ids (str | SequenceNotStr[str] | None): Filter on simulator external ids. - active (bool | None): Filter on active status of the simulator integration. + limit: The maximum number of simulator integrations to return, pass None to return all. + simulator_external_ids: Filter on simulator external ids. + active: Filter on active status of the simulator integration. Returns: - SimulatorIntegrationList: List of simulator integrations + List of simulator integrations Examples: List a few simulator integrations: @@ -128,8 +128,8 @@ async def delete( """`Delete simulator integrations `_ Args: - ids (int | Sequence[int] | None): Id(s) of simulator integrations to delete - external_ids (str | SequenceNotStr[str] | None): External_id(s) of simulator integrations to delete + ids: Id(s) of simulator integrations to delete + external_ids: External_id(s) of simulator integrations to delete Examples: Delete simulator integrations by id or external id: diff --git a/cognite/client/_api/simulators/logs.py b/cognite/client/_api/simulators/logs.py index 12fc4a4516..37d011c659 100644 --- a/cognite/client/_api/simulators/logs.py +++ b/cognite/client/_api/simulators/logs.py @@ -41,10 +41,10 @@ async def retrieve(self, ids: int | Sequence[int]) -> SimulatorLogList | Simulat They help users identify issues, diagnose problems, and gain insights into the behavior of the simulator integrations. Args: - ids (int | Sequence[int]): The ids of the simulator log. + ids: The ids of the simulator log. Returns: - SimulatorLogList | SimulatorLog | None: Requested simulator log(s) + Requested simulator log(s) Examples: Get simulator logs by simulator model id: diff --git a/cognite/client/_api/simulators/models.py b/cognite/client/_api/simulators/models.py index 8acba1e0df..6dcc1513d7 100644 --- a/cognite/client/_api/simulators/models.py +++ b/cognite/client/_api/simulators/models.py @@ -46,12 +46,12 @@ async def list( Retrieves a list of simulator models that match the given criteria. Args: - limit (int | None): Maximum number of results to return. Defaults to 25. Set to -1, float(“inf”) or None to return all items. - simulator_external_ids (str | SequenceNotStr[str] | None): Filter by simulator external id(s). - sort (PropertySort | None): The criteria to sort by. + limit: Maximum number of results to return. Defaults to 25. Set to -1, float(“inf”) or None to return all items. + simulator_external_ids: Filter by simulator external id(s). + sort: The criteria to sort by. Returns: - SimulatorModelList: List of simulator models + List of simulator models Examples: List simulator models: @@ -109,11 +109,11 @@ async def retrieve( Retrieve one or more simulator models by ID(s) or external ID(s). Args: - ids (int | Sequence[int] | None): The id of the simulator model(s). - external_ids (str | SequenceNotStr[str] | None): The external id of the simulator model(s). + ids: The id of the simulator model(s). + external_ids: The external id of the simulator model(s). Returns: - SimulatorModel | SimulatorModelList | None: Requested simulator model(s) + Requested simulator model(s) Examples: Get simulator model by id: @@ -171,13 +171,13 @@ async def __call__( Fetches simulator models as they are iterated over, so you keep a limited number of simulator models in memory. Args: - chunk_size (int | None): Number of simulator models to return in each chunk. Defaults to yielding one simulator model a time. - simulator_external_ids (str | SequenceNotStr[str] | None): Filter by simulator external id(s). - sort (PropertySort | None): The criteria to sort by. - limit (int | None): Maximum number of results to return. Defaults to 25. Set to -1, float(“inf”) or None to return all items. + chunk_size: Number of simulator models to return in each chunk. Defaults to yielding one simulator model a time. + simulator_external_ids: Filter by simulator external id(s). + sort: The criteria to sort by. + limit: Maximum number of results to return. Defaults to 25. Set to -1, float(“inf”) or None to return all items. Yields: - SimulatorModel | SimulatorModelList: yields SimulatorModel one by one if chunk is not specified, else SimulatorModelList objects. + yields SimulatorModel one by one if chunk is not specified, else SimulatorModelList objects. """ # noqa: DOC404 model_filter = SimulatorModelsFilter(simulator_external_ids=simulator_external_ids) async for item in self._list_generator( @@ -203,10 +203,10 @@ async def create( """`Create simulator models `_ Args: - items (SimulatorModelWrite | Sequence[SimulatorModelWrite]): The model(s) to create. + items: The model(s) to create. Returns: - SimulatorModel | SimulatorModelList: Created simulator model(s) + Created simulator model(s) Examples: Create new simulator models: @@ -244,8 +244,8 @@ async def delete( """`Delete simulator models `_ Args: - ids (int | Sequence[int] | None): id (or sequence of ids) for the model(s) to delete. - external_ids (str | SequenceNotStr[str] | None): external id (or sequence of external ids) for the model(s) to delete. + ids: id (or sequence of ids) for the model(s) to delete. + external_ids: external id (or sequence of external ids) for the model(s) to delete. Examples: Delete simulator models by id or external id: @@ -282,10 +282,10 @@ async def update( """`Update simulator models `_ Args: - items (SimulatorModel | SimulatorModelWrite | SimulatorModelUpdate | Sequence[SimulatorModel | SimulatorModelWrite | SimulatorModelUpdate]): The model to update. + items: The model to update. Returns: - SimulatorModel | SimulatorModelList: Updated simulator model(s) + Updated simulator model(s) Examples: Update a simulator model that you have fetched. This will perform a full update of the model: diff --git a/cognite/client/_api/simulators/models_revisions.py b/cognite/client/_api/simulators/models_revisions.py index 5c6e3ae479..fdde26b4d2 100644 --- a/cognite/client/_api/simulators/models_revisions.py +++ b/cognite/client/_api/simulators/models_revisions.py @@ -50,15 +50,15 @@ async def list( Retrieves a list of simulator model revisions that match the given criteria. Args: - limit (int): Maximum number of results to return. Defaults to 25. Set to -1, float(“inf”) or None to return all items. - sort (PropertySort | None): The criteria to sort by. - model_external_ids (str | SequenceNotStr[str] | None): The external ids of the simulator models to filter by. - all_versions (bool | None): If True, all versions of the simulator model revisions are returned. If False, only the latest version is returned. - created_time (TimestampRange | None): Filter by created time. - last_updated_time (TimestampRange | None): Filter by last updated time. + limit: Maximum number of results to return. Defaults to 25. Set to -1, float(“inf”) or None to return all items. + sort: The criteria to sort by. + model_external_ids: The external ids of the simulator models to filter by. + all_versions: If True, all versions of the simulator model revisions are returned. If False, only the latest version is returned. + created_time: Filter by created time. + last_updated_time: Filter by last updated time. Returns: - SimulatorModelRevisionList: List of simulator model revisions + List of simulator model revisions Examples: List simulator model revisions: @@ -118,11 +118,11 @@ async def retrieve( Retrieve one or more simulator model revisions by ID(s) or external ID(s). Args: - ids (int | Sequence[int] | None): The ids of the simulator model revisions. - external_ids (str | SequenceNotStr[str] | None): The external ids of the simulator model revisions. + ids: The ids of the simulator model revisions. + external_ids: The external ids of the simulator model revisions. Returns: - SimulatorModelRevision | SimulatorModelRevisionList | None: Requested simulator model revision(s) + Requested simulator model revision(s) Examples: Get simulator model revision by id: @@ -191,16 +191,16 @@ async def __call__( Fetches simulator model revisions as they are iterated over, so you keep a limited number of simulator model revisions in memory. Args: - chunk_size (int | None): Number of simulator model revisions to return in each chunk. Defaults to yielding one simulator model revision a time. - sort (PropertySort | None): The criteria to sort by. - model_external_ids (str | SequenceNotStr[str] | None): The external ids of the simulator models to filter by. - all_versions (bool | None): If True, all versions of the simulator model revisions are returned. If False, only the latest version is returned. - created_time (TimestampRange | None): Filter by created time. - last_updated_time (TimestampRange | None): Filter by last updated time. - limit (int | None): Maximum number of results to return. Defaults to 25. Set to -1, float(“inf”) or None to return all items. + chunk_size: Number of simulator model revisions to return in each chunk. Defaults to yielding one simulator model revision a time. + sort: The criteria to sort by. + model_external_ids: The external ids of the simulator models to filter by. + all_versions: If True, all versions of the simulator model revisions are returned. If False, only the latest version is returned. + created_time: Filter by created time. + last_updated_time: Filter by last updated time. + limit: Maximum number of results to return. Defaults to 25. Set to -1, float(“inf”) or None to return all items. Yields: - SimulatorModelRevision | SimulatorModelRevisionList: yields SimulatorModelRevision one by one if chunk is not specified, else SimulatorModelRevisionList objects. + yields SimulatorModelRevision one by one if chunk is not specified, else SimulatorModelRevisionList objects. """ # noqa: DOC404 model_revisions_filter = SimulatorModelRevisionsFilter( model_external_ids=model_external_ids, @@ -231,10 +231,10 @@ async def create( """`Create simulator model revisions `_ Args: - items (SimulatorModelRevisionWrite | Sequence[SimulatorModelRevisionWrite]): The model revision(s) to create. + items: The model revision(s) to create. Returns: - SimulatorModelRevision | SimulatorModelRevisionList: Created simulator model revision(s) + Created simulator model revision(s) Examples: Create new simulator model revisions: @@ -280,9 +280,9 @@ async def retrieve_data(self, model_revision_external_id: str) -> SimulatorModel Retrieves a list of simulator model revisions data that match the given criteria. Args: - model_revision_external_id (str): The external id of the simulator model revision to filter by. + model_revision_external_id: The external id of the simulator model revision to filter by. Returns: - SimulatorModelRevisionDataList: List of simulator model revision data + List of simulator model revision data Examples: List simulator model revision data: diff --git a/cognite/client/_api/simulators/routine_revisions.py b/cognite/client/_api/simulators/routine_revisions.py index e8f8a16f4c..7b7a980404 100644 --- a/cognite/client/_api/simulators/routine_revisions.py +++ b/cognite/client/_api/simulators/routine_revisions.py @@ -84,20 +84,20 @@ async def __call__( Fetches simulator routine revisions as they are iterated over, so you keep a limited number of simulator routine revisions in memory. Args: - chunk_size (int | None): Number of simulator routine revisions to return in each chunk. Defaults to yielding one simulator routine revision a time. - routine_external_ids (SequenceNotStr[str] | None): Filter on routine external ids. - model_external_ids (SequenceNotStr[str] | None): Filter on model external ids. - simulator_integration_external_ids (SequenceNotStr[str] | None): Filter on simulator integration external ids. - simulator_external_ids (SequenceNotStr[str] | None): Filter on simulator external ids. - kind (Literal['long'] | None): Filter by routine kind. Note that this filter cannot be applied when 'include_all_fields' set to 'True' in the same query. - created_time (TimestampRange | None): Filter on created time. - all_versions (bool): If all versions of the routine should be returned. Defaults to false which only returns the latest version. - include_all_fields (bool): If all fields should be included in the response. Defaults to false which does not include script, configuration.inputs and configuration.outputs in the response. - limit (int | None): Maximum number of simulator routine revisions to return. Defaults to return all items. - sort (PropertySort | None): The criteria to sort by. + chunk_size: Number of simulator routine revisions to return in each chunk. Defaults to yielding one simulator routine revision a time. + routine_external_ids: Filter on routine external ids. + model_external_ids: Filter on model external ids. + simulator_integration_external_ids: Filter on simulator integration external ids. + simulator_external_ids: Filter on simulator external ids. + kind: Filter by routine kind. Note that this filter cannot be applied when 'include_all_fields' set to 'True' in the same query. + created_time: Filter on created time. + all_versions: If all versions of the routine should be returned. Defaults to false which only returns the latest version. + include_all_fields: If all fields should be included in the response. Defaults to false which does not include script, configuration.inputs and configuration.outputs in the response. + limit: Maximum number of simulator routine revisions to return. Defaults to return all items. + sort: The criteria to sort by. Yields: - SimulatorRoutineRevision | SimulatorRoutineRevisionList: yields SimulatorRoutineRevision one by one if chunk is not specified, else SimulatorRoutineRevisionList objects. + yields SimulatorRoutineRevision one by one if chunk is not specified, else SimulatorRoutineRevisionList objects. """ # noqa: DOC404 self._warning.warn() filter = SimulatorRoutineRevisionsFilter( @@ -145,11 +145,11 @@ async def retrieve( Retrieve simulator routine revisions by ID or External Id. Args: - ids (int | Sequence[int] | None): Simulator routine revision ID or list of IDs - external_ids (str | SequenceNotStr[str] | None): Simulator routine revision External ID or list of external IDs + ids: Simulator routine revision ID or list of IDs + external_ids: Simulator routine revision External ID or list of external IDs Returns: - SimulatorRoutineRevision | SimulatorRoutineRevisionList | None: Requested simulator routine revision + Requested simulator routine revision Examples: Get simulator routine revision by id: @@ -183,10 +183,10 @@ async def create( """`Create simulator routine revisions `_ Args: - items (SimulatorRoutineRevisionWrite | Sequence[SimulatorRoutineRevisionWrite]): Simulator routine revisions to create. + items: Simulator routine revisions to create. Returns: - SimulatorRoutineRevision | SimulatorRoutineRevisionList: Created simulator routine revision(s) + Created simulator routine revision(s) Examples: Create new simulator routine revisions: @@ -325,19 +325,19 @@ async def list( Retrieves a list of simulator routine revisions that match the given criteria. Args: - routine_external_ids (SequenceNotStr[str] | None): Filter on routine external ids. - model_external_ids (SequenceNotStr[str] | None): Filter on model external ids. - simulator_integration_external_ids (SequenceNotStr[str] | None): Filter on simulator integration external ids. - simulator_external_ids (SequenceNotStr[str] | None): Filter on simulator external ids. - kind (Literal['long'] | None): Filter by routine kind. Note that this filter cannot be applied when 'include_all_fields' set to 'True' in the same query. - created_time (TimestampRange | None): Filter on created time. - all_versions (bool): If all versions of the routine should be returned. Defaults to false which only returns the latest version. - include_all_fields (bool): If all fields should be included in the response. Defaults to false which does not include script, configuration.inputs and configuration.outputs in the response. - limit (int | None): Maximum number of simulator routine revisions to return. Defaults to return all items. - sort (PropertySort | None): The criteria to sort by. + routine_external_ids: Filter on routine external ids. + model_external_ids: Filter on model external ids. + simulator_integration_external_ids: Filter on simulator integration external ids. + simulator_external_ids: Filter on simulator external ids. + kind: Filter by routine kind. Note that this filter cannot be applied when 'include_all_fields' set to 'True' in the same query. + created_time: Filter on created time. + all_versions: If all versions of the routine should be returned. Defaults to false which only returns the latest version. + include_all_fields: If all fields should be included in the response. Defaults to false which does not include script, configuration.inputs and configuration.outputs in the response. + limit: Maximum number of simulator routine revisions to return. Defaults to return all items. + sort: The criteria to sort by. Returns: - SimulatorRoutineRevisionList: List of simulator routine revisions + List of simulator routine revisions Examples: List simulator routine revisions: diff --git a/cognite/client/_api/simulators/routines.py b/cognite/client/_api/simulators/routines.py index 08cb66174f..7e04419465 100644 --- a/cognite/client/_api/simulators/routines.py +++ b/cognite/client/_api/simulators/routines.py @@ -71,14 +71,14 @@ async def __call__( Fetches simulator routines as they are iterated over, so you keep a limited number of simulator routines in memory. Args: - chunk_size (int | None): Number of simulator routines to return in each chunk. Defaults to yielding one simulator routine a time. - model_external_ids (Sequence[str] | None): Filter on model external ids. - simulator_integration_external_ids (Sequence[str] | None): Filter on simulator integration external ids. - sort (PropertySort | None): The criteria to sort by. - limit (int | None): Maximum number of simulator routines to return. Defaults to return all items. + chunk_size: Number of simulator routines to return in each chunk. Defaults to yielding one simulator routine a time. + model_external_ids: Filter on model external ids. + simulator_integration_external_ids: Filter on simulator integration external ids. + sort: The criteria to sort by. + limit: Maximum number of simulator routines to return. Defaults to return all items. Yields: - SimulatorRoutine | SimulatorRoutineList: yields SimulatorRoutine one by one if chunk is not specified, else SimulatorRoutineList objects. + yields SimulatorRoutine one by one if chunk is not specified, else SimulatorRoutineList objects. """ # noqa: DOC404 self._warning.warn() routines_filter = SimulatorRoutinesFilter( @@ -109,10 +109,10 @@ async def create( """`Create simulator routines `_ Args: - routine (SimulatorRoutineWrite | Sequence[SimulatorRoutineWrite]): Simulator routine(s) to create. + routine: Simulator routine(s) to create. Returns: - SimulatorRoutine | SimulatorRoutineList: Created simulator routine(s) + Created simulator routine(s) Examples: Create new simulator routines: @@ -156,8 +156,8 @@ async def delete( """`Delete simulator routines `_ Args: - ids (int | Sequence[int] | None): ids (or sequence of ids) for the routine(s) to delete. - external_ids (str | SequenceNotStr[str] | SequenceNotStr[str] | None): external ids (or sequence of external ids) for the routine(s) to delete. + ids: ids (or sequence of ids) for the routine(s) to delete. + external_ids: external ids (or sequence of external ids) for the routine(s) to delete. Examples: Delete simulator routines by id or external id: @@ -185,14 +185,14 @@ async def list( Retrieves a list of simulator routines that match the given criteria. Args: - limit (int): Maximum number of results to return. Defaults to 25. Set to -1, float(“inf”) or None to return all items. - model_external_ids (Sequence[str] | None): Filter on model external ids. - simulator_integration_external_ids (Sequence[str] | None): Filter on simulator integration external ids. - kind (Literal['long'] | None): Filter on routine kind. - sort (PropertySort | None): The criteria to sort by. + limit: Maximum number of results to return. Defaults to 25. Set to -1, float(“inf”) or None to return all items. + model_external_ids: Filter on model external ids. + simulator_integration_external_ids: Filter on simulator integration external ids. + kind: Filter on routine kind. + sort: The criteria to sort by. Returns: - SimulatorRoutineList: List of simulator routines + List of simulator routines Examples: List simulator routines: @@ -285,21 +285,18 @@ async def run( 2. By routine revision external ID + model revision external ID Args: - routine_external_id (str | None): External id of the simulator routine to run. - Cannot be specified together with routine_revision_external_id and model_revision_external_id. - routine_revision_external_id (str | None): External id of the simulator routine revision to run. - Must be specified together with model_revision_external_id. - model_revision_external_id (str | None): External id of the simulator model revision. - Must be specified together with routine_revision_external_id. - inputs (Sequence[SimulationInputOverride] | None): List of input overrides - run_time (int | None): Run time in milliseconds. Reference timestamp used for data pre-processing and data sampling. - queue (bool | None): Queue the simulation run when connector is down. - log_severity (Literal['Debug', 'Information', 'Warning', 'Error'] | None): Override the minimum severity level for the simulation run logs. If not provided, the minimum severity is read from the connector logger configuration. - wait (bool): Wait until the simulation run is finished. Defaults to True. - timeout (float): Timeout in seconds for waiting for the simulation run to finish. Defaults to 60 seconds. + routine_external_id: External id of the simulator routine to run. Cannot be specified together with routine_revision_external_id and model_revision_external_id. + routine_revision_external_id: External id of the simulator routine revision to run. Must be specified together with model_revision_external_id. + model_revision_external_id: External id of the simulator model revision. Must be specified together with routine_revision_external_id. + inputs: List of input overrides + run_time: Run time in milliseconds. Reference timestamp used for data pre-processing and data sampling. + queue: Queue the simulation run when connector is down. + log_severity: Override the minimum severity level for the simulation run logs. If not provided, the minimum severity is read from the connector logger configuration. + wait: Wait until the simulation run is finished. Defaults to True. + timeout: Timeout in seconds for waiting for the simulation run to finish. Defaults to 60 seconds. Returns: - SimulationRun: Created simulation run + Created simulation run Examples: Create new simulation run using routine external ID: diff --git a/cognite/client/_api/simulators/runs.py b/cognite/client/_api/simulators/runs.py index 57e6309616..345cde45c7 100644 --- a/cognite/client/_api/simulators/runs.py +++ b/cognite/client/_api/simulators/runs.py @@ -99,22 +99,22 @@ async def __call__( Fetches simulation runs as they are iterated over, so you keep a limited number of simulation runs in memory. Args: - chunk_size (int | None): Number of simulation runs to return in each chunk. Defaults to yielding one simulation run a time. - limit (int | None): The maximum number of simulation runs to return, pass None to return all. - status (str | None): Filter by simulation run status - run_type (str | None): Filter by simulation run type - model_external_ids (SequenceNotStr[str] | None): Filter by simulator model external ids - simulator_integration_external_ids (SequenceNotStr[str] | None): Filter by simulator integration external ids - simulator_external_ids (SequenceNotStr[str] | None): Filter by simulator external ids - routine_external_ids (SequenceNotStr[str] | None): Filter by routine external ids - routine_revision_external_ids (SequenceNotStr[str] | None): Filter by routine revision external ids - model_revision_external_ids (SequenceNotStr[str] | None): Filter by model revision external ids - created_time (TimestampRange | None): Filter by created time - simulation_time (TimestampRange | None): Filter by simulation time - sort (SimulationRunsSort | None): The criteria to sort by. + chunk_size: Number of simulation runs to return in each chunk. Defaults to yielding one simulation run a time. + limit: The maximum number of simulation runs to return, pass None to return all. + status: Filter by simulation run status + run_type: Filter by simulation run type + model_external_ids: Filter by simulator model external ids + simulator_integration_external_ids: Filter by simulator integration external ids + simulator_external_ids: Filter by simulator external ids + routine_external_ids: Filter by routine external ids + routine_revision_external_ids: Filter by routine revision external ids + model_revision_external_ids: Filter by model revision external ids + created_time: Filter by created time + simulation_time: Filter by simulation time + sort: The criteria to sort by. Yields: - SimulationRun | SimulationRunList: yields Simulation Run one by one if chunk is not specified, else SimulatorRunsList objects. + yields Simulation Run one by one if chunk is not specified, else SimulatorRunsList objects. """ # noqa: DOC404 filter_runs = SimulatorRunsFilter( status=status, @@ -160,21 +160,21 @@ async def list( Retrieves a list of simulation runs that match the given criteria. Args: - limit (int | None): The maximum number of simulation runs to return, pass None to return all. - status (str | None): Filter by simulation run status - run_type (str | None): Filter by simulation run type - model_external_ids (SequenceNotStr[str] | None): Filter by simulator model external ids - simulator_integration_external_ids (SequenceNotStr[str] | None): Filter by simulator integration external ids - simulator_external_ids (SequenceNotStr[str] | None): Filter by simulator external ids - routine_external_ids (SequenceNotStr[str] | None): Filter by routine external ids - routine_revision_external_ids (SequenceNotStr[str] | None): Filter by routine revision external ids - model_revision_external_ids (SequenceNotStr[str] | None): Filter by model revision external ids - created_time (TimestampRange | None): Filter by created time - simulation_time (TimestampRange | None): Filter by simulation time - sort (SimulationRunsSort | None): The criteria to sort by. + limit: The maximum number of simulation runs to return, pass None to return all. + status: Filter by simulation run status + run_type: Filter by simulation run type + model_external_ids: Filter by simulator model external ids + simulator_integration_external_ids: Filter by simulator integration external ids + simulator_external_ids: Filter by simulator external ids + routine_external_ids: Filter by routine external ids + routine_revision_external_ids: Filter by routine revision external ids + model_revision_external_ids: Filter by model revision external ids + created_time: Filter by created time + simulation_time: Filter by simulation time + sort: The criteria to sort by. Returns: - SimulationRunList: List of simulation runs + List of simulation runs Examples: List simulation runs: @@ -239,10 +239,10 @@ async def retrieve( """`Retrieve simulation runs by ID `_ Args: - ids (int | Sequence[int]): The ID(s) of the simulation run(s) to retrieve. + ids: The ID(s) of the simulation run(s) to retrieve. Returns: - SimulationRun | SimulationRunList | None: The simulation run(s) with the given ID(s) + The simulation run(s) with the given ID(s) Examples: Retrieve a single simulation run by id: @@ -272,10 +272,10 @@ async def create( """`Create simulation runs `_ Args: - items (SimulationRunWrite | Sequence[SimulationRunWrite]): The simulation run(s) to execute. + items: The simulation run(s) to execute. Returns: - SimulationRun | SimulationRunList: Created simulation run(s) + Created simulation run(s) Examples: Create new simulation run: @@ -311,10 +311,10 @@ async def list_run_data( Retrieve data associated with a simulation run by ID. Args: - run_id (int): Simulation run id. + run_id: Simulation run id. Returns: - SimulationRunDataList: List of simulation run data + List of simulation run data Examples: Get simulation run data by run id: diff --git a/cognite/client/_api/synthetic_time_series.py b/cognite/client/_api/synthetic_time_series.py index 3adbb99d74..6e7e867edb 100644 --- a/cognite/client/_api/synthetic_time_series.py +++ b/cognite/client/_api/synthetic_time_series.py @@ -104,21 +104,19 @@ async def query( You can read the guide to synthetic time series in our `documentation `_. Args: - expressions (str | sympy.Basic | Sequence[str] | Sequence[sympy.Basic]): Functions to be calculated. Supports both strings and sympy expressions. Strings can have either the API `ts{}` syntax, or contain variable names to be replaced using the `variables` parameter. - start (int | str | datetime.datetime): Inclusive start. - end (int | str | datetime.datetime): Exclusive end. - limit (int | None): Number of datapoints per expression to retrieve. - variables (Mapping[str | sympy.Symbol, str | NodeId | TimeSeries | TimeSeriesWrite] | None): An optional map of symbol replacements. - aggregate (str | None): use this aggregate when replacing entries from `variables`, does not affect time series given in the `ts{}` syntax. - granularity (str | None): use this granularity with the aggregate. - target_unit (str | None): use this target_unit when replacing entries from `variables`, does not affect time series given in the `ts{}` syntax. - target_unit_system (str | None): Same as target_unit, but with unit system (e.g. SI). Only one of target_unit and target_unit_system can be specified. - timezone (str | datetime.timezone | ZoneInfo | None): The timezone to use when aggregating datapoints. For aggregates of granularity 'hour' and longer, - which time zone should we align to. Align to the start of the hour, start of the day or start of the month. For time zones of type Region/Location, - the aggregate duration can vary, typically due to daylight saving time. For time zones of type UTC+/-HH:MM, use increments of 15 minutes. Default: "UTC" (None) + expressions: Functions to be calculated. Supports both strings and sympy expressions. Strings can have either the API `ts{}` syntax, or contain variable names to be replaced using the `variables` parameter. + start: Inclusive start. + end: Exclusive end. + limit: Number of datapoints per expression to retrieve. + variables: An optional map of symbol replacements. + aggregate: use this aggregate when replacing entries from `variables`, does not affect time series given in the `ts{}` syntax. + granularity: use this granularity with the aggregate. + target_unit: use this target_unit when replacing entries from `variables`, does not affect time series given in the `ts{}` syntax. + target_unit_system: Same as target_unit, but with unit system (e.g. SI). Only one of target_unit and target_unit_system can be specified. + timezone: The timezone to use when aggregating datapoints. For aggregates of granularity 'hour' and longer, which time zone should we align to. Align to the start of the hour, start of the day or start of the month. For time zones of type Region/Location, the aggregate duration can vary, typically due to daylight saving time. For time zones of type UTC+/-HH:MM, use increments of 15 minutes. Default: "UTC" (None) Returns: - Datapoints | DatapointsList: A DatapointsList object containing the calculated data. + A DatapointsList object containing the calculated data. Examples: diff --git a/cognite/client/_api/three_d/asset_mapping.py b/cognite/client/_api/three_d/asset_mapping.py index 4dc36de0ae..a12f01929c 100644 --- a/cognite/client/_api/three_d/asset_mapping.py +++ b/cognite/client/_api/three_d/asset_mapping.py @@ -33,15 +33,15 @@ async def list( """`List 3D node asset mappings. `_ Args: - model_id (int): Id of the model. - revision_id (int): Id of the revision. - node_id (int | None): List only asset mappings associated with this node. - asset_id (int | None): List only asset mappings associated with this asset. - intersects_bounding_box (BoundingBox3D | None): If given, only return asset mappings for assets whose bounding box intersects with the given bounding box. - limit (int | None): Maximum number of asset mappings to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + model_id: Id of the model. + revision_id: Id of the revision. + node_id: List only asset mappings associated with this node. + asset_id: List only asset mappings associated with this asset. + intersects_bounding_box: If given, only return asset mappings for assets whose bounding box intersects with the given bounding box. + limit: Maximum number of asset mappings to return. Defaults to 25. Set to -1, float("inf") or None to return all items. Returns: - ThreeDAssetMappingList: The list of asset mappings. + The list of asset mappings. Example: @@ -97,12 +97,12 @@ async def create( """`Create 3d node asset mappings. `_ Args: - model_id (int): Id of the model. - revision_id (int): Id of the revision. - asset_mapping (ThreeDAssetMapping | ThreeDAssetMappingWrite | Sequence[ThreeDAssetMapping] | Sequence[ThreeDAssetMappingWrite]): The asset mapping(s) to create. + model_id: Id of the model. + revision_id: Id of the revision. + asset_mapping: The asset mapping(s) to create. Returns: - ThreeDAssetMapping | ThreeDAssetMappingList: The created asset mapping(s). + The created asset mapping(s). Example: @@ -130,9 +130,9 @@ async def delete( """`Delete 3d node asset mappings. `_ Args: - model_id (int): Id of the model. - revision_id (int): Id of the revision. - asset_mapping (ThreeDAssetMapping | Sequence[ThreeDAssetMapping]): The asset mapping(s) to delete. + model_id: Id of the model. + revision_id: Id of the revision. + asset_mapping: The asset mapping(s) to delete. Example: diff --git a/cognite/client/_api/three_d/files.py b/cognite/client/_api/three_d/files.py index 0c7d8a2e68..73181b41cf 100644 --- a/cognite/client/_api/three_d/files.py +++ b/cognite/client/_api/three_d/files.py @@ -11,10 +11,10 @@ async def retrieve(self, id: int) -> bytes: """`Retrieve the contents of a 3d file by id. `_ Args: - id (int): The id of the file to retrieve. + id: The id of the file to retrieve. Returns: - bytes: The contents of the file. + The contents of the file. Example: diff --git a/cognite/client/_api/three_d/models.py b/cognite/client/_api/three_d/models.py index 64fc3f4a1c..80baf94d92 100644 --- a/cognite/client/_api/three_d/models.py +++ b/cognite/client/_api/three_d/models.py @@ -37,12 +37,12 @@ async def __call__( Fetches 3d models as they are iterated over, so you keep a limited number of 3d models in memory. Args: - chunk_size (int | None): Number of 3d models to return in each chunk. Defaults to yielding one model a time. - published (bool | None): Filter based on whether or not the model has published revisions. - limit (int | None): Maximum number of 3d models to return. Defaults to return all items. + chunk_size: Number of 3d models to return in each chunk. Defaults to yielding one model a time. + published: Filter based on whether or not the model has published revisions. + limit: Maximum number of 3d models to return. Defaults to return all items. Yields: - ThreeDModel | ThreeDModelList: yields ThreeDModel one by one if chunk is not specified, else ThreeDModelList objects. + yields ThreeDModel one by one if chunk is not specified, else ThreeDModelList objects. """ # noqa: DOC404 async for item in self._list_generator( list_cls=ThreeDModelList, @@ -58,10 +58,10 @@ async def retrieve(self, id: int) -> ThreeDModel | None: """`Retrieve a 3d model by id `_ Args: - id (int): Get the model with this id. + id: Get the model with this id. Returns: - ThreeDModel | None: The requested 3d model. + The requested 3d model. Example: @@ -78,11 +78,11 @@ async def list(self, published: bool | None = None, limit: int | None = DEFAULT_ """`List 3d models. `_ Args: - published (bool | None): Filter based on whether or not the model has published revisions. - limit (int | None): Maximum number of models to retrieve. Defaults to 25. Set to -1, float("inf") or None to return all items. + published: Filter based on whether or not the model has published revisions. + limit: Maximum number of models to retrieve. Defaults to 25. Set to -1, float("inf") or None to return all items. Returns: - ThreeDModelList: The list of 3d models. + The list of 3d models. Examples: @@ -136,14 +136,12 @@ async def create( """`Create new 3d models. `_ Args: - name (str | ThreeDModelWrite | SequenceNotStr[str | ThreeDModelWrite]): The name of the 3d model(s) or 3D - model object to create. If a 3D model object is provided, the other arguments are ignored. - data_set_id (int | None): The id of the dataset this 3D model belongs to. - metadata (dict[str, str] | None): Custom, application-specific metadata. String key -> String value. - Limits: Maximum length of key is 32 bytes, value 512 bytes, up to 16 key-value pairs. + name: The name of the 3d model(s) or 3D model object to create. If a 3D model object is provided, the other arguments are ignored. + data_set_id: The id of the dataset this 3D model belongs to. + metadata: Custom, application-specific metadata. String key -> String value. Limits: Maximum length of key is 32 bytes, value 512 bytes, up to 16 key-value pairs. Returns: - ThreeDModel | ThreeDModelList: The created 3d model(s). + The created 3d model(s). Example: @@ -196,11 +194,11 @@ async def update( """`Update 3d models. `_ Args: - item (ThreeDModel | ThreeDModelUpdate | Sequence[ThreeDModel | ThreeDModelUpdate]): ThreeDModel(s) to update - mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (ThreeDModel or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. + item: ThreeDModel(s) to update + mode: How to update data when a non-update object is given (ThreeDModel or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. Returns: - ThreeDModel | ThreeDModelList: Updated ThreeDModel(s) + Updated ThreeDModel(s) Examples: @@ -234,7 +232,7 @@ async def delete(self, id: int | Sequence[int]) -> None: """`Delete 3d models. `_ Args: - id (int | Sequence[int]): ID or list of IDs to delete. + id: ID or list of IDs to delete. Example: diff --git a/cognite/client/_api/three_d/revisions.py b/cognite/client/_api/three_d/revisions.py index ead00ab562..5a147124f3 100644 --- a/cognite/client/_api/three_d/revisions.py +++ b/cognite/client/_api/three_d/revisions.py @@ -36,13 +36,13 @@ async def __call__( Fetches 3d model revisions as they are iterated over, so you keep a limited number of 3d model revisions in memory. Args: - model_id (int): Iterate over revisions for the model with this id. - chunk_size (int | None): Number of 3d model revisions to return in each chunk. Defaults to yielding one model a time. - published (bool): Filter based on whether or not the revision has been published. - limit (int | None): Maximum number of 3d model revisions to return. Defaults to return all items. + model_id: Iterate over revisions for the model with this id. + chunk_size: Number of 3d model revisions to return in each chunk. Defaults to yielding one model a time. + published: Filter based on whether or not the revision has been published. + limit: Maximum number of 3d model revisions to return. Defaults to return all items. Yields: - ThreeDModelRevision | ThreeDModelRevisionList: yields ThreeDModelRevision one by one if chunk is not specified, else ThreeDModelRevisionList objects. + yields ThreeDModelRevision one by one if chunk is not specified, else ThreeDModelRevisionList objects. """ # noqa: DOC404 async for item in self._list_generator( list_cls=ThreeDModelRevisionList, @@ -59,11 +59,11 @@ async def retrieve(self, model_id: int, id: int) -> ThreeDModelRevision | None: """`Retrieve a 3d model revision by id `_ Args: - model_id (int): Get the revision under the model with this id. - id (int): Get the model revision with this id. + model_id: Get the revision under the model with this id. + id: Get the model revision with this id. Returns: - ThreeDModelRevision | None: The requested 3d model revision. + The requested 3d model revision. Example: @@ -101,11 +101,11 @@ async def create( """`Create a revisions for a specified 3d model. `_ Args: - model_id (int): Create revisions for this model. - revision (ThreeDModelRevision | ThreeDModelRevisionWrite | Sequence[ThreeDModelRevision] | Sequence[ThreeDModelRevisionWrite]): The revision(s) to create. + model_id: Create revisions for this model. + revision: The revision(s) to create. Returns: - ThreeDModelRevision | ThreeDModelRevisionList: The created revision(s) + The created revision(s) Example: @@ -132,12 +132,12 @@ async def list( """`List 3d model revisions. `_ Args: - model_id (int): List revisions under the model with this id. - published (bool): Filter based on whether or not the revision is published. - limit (int | None): Maximum number of models to retrieve. Defaults to 25. Set to -1, float("inf") or None to return all items. + model_id: List revisions under the model with this id. + published: Filter based on whether or not the revision is published. + limit: Maximum number of models to retrieve. Defaults to 25. Set to -1, float("inf") or None to return all items. Returns: - ThreeDModelRevisionList: The list of 3d model revisions. + The list of 3d model revisions. Example: @@ -168,12 +168,12 @@ async def update( """`Update 3d model revisions. `_ Args: - model_id (int): Update the revision under the model with this id. - item (ThreeDModelRevision | ThreeDModelRevisionUpdate | Sequence[ThreeDModelRevision | ThreeDModelRevisionUpdate]): ThreeDModelRevision(s) to update - mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (ThreeDModelRevision or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. + model_id: Update the revision under the model with this id. + item: ThreeDModelRevision(s) to update + mode: How to update data when a non-update object is given (ThreeDModelRevision or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. Returns: - ThreeDModelRevision | ThreeDModelRevisionList: Updated ThreeDModelRevision(s) + Updated ThreeDModelRevision(s) Examples: @@ -205,8 +205,8 @@ async def delete(self, model_id: int, id: int | Sequence[int]) -> None: """`Delete 3d model revisions. `_ Args: - model_id (int): Delete the revision under the model with this id. - id (int | Sequence[int]): ID or list of IDs to delete. + model_id: Delete the revision under the model with this id. + id: ID or list of IDs to delete. Example: @@ -227,9 +227,9 @@ async def update_thumbnail(self, model_id: int, revision_id: int, file_id: int) """`Update a revision thumbnail. `_ Args: - model_id (int): Id of the model. - revision_id (int): Id of the revision. - file_id (int): Id of the thumbnail file in the Files API. + model_id: Id of the model. + revision_id: Id of the revision. + file_id: Id of the thumbnail file in the Files API. Example: @@ -259,16 +259,16 @@ async def list_nodes( the resulting subtree with the 'depth' query parameter. Args: - model_id (int): Id of the model. - revision_id (int): Id of the revision. - node_id (int | None): ID of the root node of the subtree you request (default is the root node). - depth (int | None): Get sub nodes up to this many levels below the specified node. Depth 0 is the root node. - sort_by_node_id (bool): Returns the nodes in `nodeId` order. - partitions (int | None): The result is retrieved in this many parts in parallel. Requires `sort_by_node_id` to be set to `true`. - limit (int | None): Maximum number of nodes to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + model_id: Id of the model. + revision_id: Id of the revision. + node_id: ID of the root node of the subtree you request (default is the root node). + depth: Get sub nodes up to this many levels below the specified node. Depth 0 is the root node. + sort_by_node_id: Returns the nodes in `nodeId` order. + partitions: The result is retrieved in this many parts in parallel. Requires `sort_by_node_id` to be set to `true`. + limit: Maximum number of nodes to return. Defaults to 25. Set to -1, float("inf") or None to return all items. Returns: - ThreeDNodeList: The list of 3d nodes. + The list of 3d nodes. Example: @@ -302,14 +302,14 @@ async def filter_nodes( """`List nodes in a revision, filtered by node property values. `_ Args: - model_id (int): Id of the model. - revision_id (int): Id of the revision. - properties (dict[str, dict[str, SequenceNotStr[str]]] | None): Properties for filtering. The object contains one or more category. Each category references one or more properties. Each property is associated with a list of values. For a node to satisfy the filter, it must, for each category/property in the filter, contain the category+property combination with a value that is contained within the corresponding list in the filter. - limit (int | None): Maximum number of nodes to return. Defaults to 25. Set to -1, float("inf") or None to return all items. - partitions (int | None): The result is retrieved in this many parts in parallel. Requires `sort_by_node_id` to be set to `true`. + model_id: Id of the model. + revision_id: Id of the revision. + properties: Properties for filtering. The object contains one or more category. Each category references one or more properties. Each property is associated with a list of values. For a node to satisfy the filter, it must, for each category/property in the filter, contain the category+property combination with a value that is contained within the corresponding list in the filter. + limit: Maximum number of nodes to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + partitions: The result is retrieved in this many parts in parallel. Requires `sort_by_node_id` to be set to `true`. Returns: - ThreeDNodeList: The list of 3d nodes. + The list of 3d nodes. Example: @@ -337,13 +337,13 @@ async def list_ancestor_nodes( """`Retrieves a list of ancestor nodes of a given node, including itself, in the hierarchy of the 3D model `_ Args: - model_id (int): Id of the model. - revision_id (int): Id of the revision. - node_id (int | None): ID of the node to get the ancestors of. - limit (int | None): Maximum number of nodes to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + model_id: Id of the model. + revision_id: Id of the revision. + node_id: ID of the node to get the ancestors of. + limit: Maximum number of nodes to return. Defaults to 25. Set to -1, float("inf") or None to return all items. Returns: - ThreeDNodeList: The list of 3d nodes. + The list of 3d nodes. Example: diff --git a/cognite/client/_api/time_series.py b/cognite/client/_api/time_series.py index cd929a3fde..93b337bf83 100644 --- a/cognite/client/_api/time_series.py +++ b/cognite/client/_api/time_series.py @@ -128,29 +128,29 @@ async def __call__( Fetches time series as they are iterated over, so you keep a limited number of objects in memory. Args: - chunk_size (int | None): Number of time series to return in each chunk. Defaults to yielding one time series a time. - name (str | None): Name of the time series. Often referred to as tag. - unit (str | None): Unit of the time series. - unit_external_id (str | None): Filter on unit external ID. - unit_quantity (str | None): Filter on unit quantity. - is_string (bool | None): Whether the time series is a string time series. - is_step (bool | None): Whether the time series is a step (piecewise constant) time series. - asset_ids (Sequence[int] | None): List time series related to these assets. - asset_external_ids (SequenceNotStr[str] | None): List time series related to these assets. - asset_subtree_ids (int | Sequence[int] | None): Only include time series that are related to an asset in a subtree rooted at any of these assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. - asset_subtree_external_ids (str | SequenceNotStr[str] | None): Only include time series that are related to an asset in a subtree rooted at any of these assetExternalIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. - data_set_ids (int | Sequence[int] | None): Return only time series in the specified data set(s) with this id / these ids. - data_set_external_ids (str | SequenceNotStr[str] | None): Return only time series in the specified data set(s) with this external id / these external ids. - metadata (dict[str, Any] | None): Custom, application specific metadata. String key -> String value - external_id_prefix (str | None): Filter by this (case-sensitive) prefix for the external ID. - created_time (dict[str, Any] | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. - last_updated_time (dict[str, Any] | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. - limit (int | None): Maximum number of time series to return. Defaults to return all items. - advanced_filter (Filter | dict[str, Any] | None): Advanced filter query using the filter DSL (Domain Specific Language). It allows defining complex filtering expressions that combine simple operations, such as equals, prefix, exists, etc., using boolean operators and, or, and not. - sort (SortSpec | list[SortSpec] | None): The criteria to sort by. Defaults to desc for `_score_` and asc for all other properties. Sort is not allowed if `partitions` is used. + chunk_size: Number of time series to return in each chunk. Defaults to yielding one time series a time. + name: Name of the time series. Often referred to as tag. + unit: Unit of the time series. + unit_external_id: Filter on unit external ID. + unit_quantity: Filter on unit quantity. + is_string: Whether the time series is a string time series. + is_step: Whether the time series is a step (piecewise constant) time series. + asset_ids: List time series related to these assets. + asset_external_ids: List time series related to these assets. + asset_subtree_ids: Only include time series that are related to an asset in a subtree rooted at any of these assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. + asset_subtree_external_ids: Only include time series that are related to an asset in a subtree rooted at any of these assetExternalIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. + data_set_ids: Return only time series in the specified data set(s) with this id / these ids. + data_set_external_ids: Return only time series in the specified data set(s) with this external id / these external ids. + metadata: Custom, application specific metadata. String key -> String value + external_id_prefix: Filter by this (case-sensitive) prefix for the external ID. + created_time: Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. + last_updated_time: Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. + limit: Maximum number of time series to return. Defaults to return all items. + advanced_filter: Advanced filter query using the filter DSL (Domain Specific Language). It allows defining complex filtering expressions that combine simple operations, such as equals, prefix, exists, etc., using boolean operators and, or, and not. + sort: The criteria to sort by. Defaults to desc for `_score_` and asc for all other properties. Sort is not allowed if `partitions` is used. Yields: - TimeSeries | TimeSeriesList: yields TimeSeries one by one if chunk_size is not specified, else TimeSeriesList objects. + yields TimeSeries one by one if chunk_size is not specified, else TimeSeriesList objects. """ # noqa: DOC404 asset_subtree_ids_processed = process_asset_subtree_ids(asset_subtree_ids, asset_subtree_external_ids) data_set_ids_processed = process_data_set_ids(data_set_ids, data_set_external_ids) @@ -193,12 +193,12 @@ async def retrieve( """`Retrieve a single time series by id. `_ Args: - id (int | None): ID - external_id (str | None): External ID - instance_id (NodeId | None): Instance ID + id: ID + external_id: External ID + instance_id: Instance ID Returns: - TimeSeries | None: Requested time series or None if it does not exist. + Requested time series or None if it does not exist. Examples: @@ -230,13 +230,13 @@ async def retrieve_multiple( """`Retrieve multiple time series by id. `_ Args: - ids (Sequence[int] | None): IDs - external_ids (SequenceNotStr[str] | None): External IDs - instance_ids (Sequence[NodeId] | None): Instance IDs - ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception. + ids: IDs + external_ids: External IDs + instance_ids: Instance IDs + ignore_unknown_ids: Ignore IDs and external IDs that are not found rather than throw an exception. Returns: - TimeSeriesList: The requested time series. + The requested time series. Examples: @@ -267,11 +267,11 @@ async def aggregate_count( """`Count of time series matching the specified filters and search. `_ Args: - advanced_filter (Filter | dict[str, Any] | None): The filter to narrow down the time series to count. - filter (TimeSeriesFilter | dict[str, Any] | None): The filter to narrow down time series to count requiring exact match. + advanced_filter: The filter to narrow down the time series to count. + filter: The filter to narrow down time series to count requiring exact match. Returns: - int: The number of time series matching the specified filters and search. + The number of time series matching the specified filters and search. Examples: @@ -307,12 +307,12 @@ async def aggregate_cardinality_values( """`Find approximate property count for time series. `_ Args: - property (TimeSeriesProperty | str | list[str]): The property to count the cardinality of. - advanced_filter (Filter | dict[str, Any] | None): The filter to narrow down the time series to count cardinality. - aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets. - filter (TimeSeriesFilter | dict[str, Any] | None): The filter to narrow down the time series to count requiring exact match. + property: The property to count the cardinality of. + advanced_filter: The filter to narrow down the time series to count cardinality. + aggregate_filter: The filter to apply to the resulting buckets. + filter: The filter to narrow down the time series to count requiring exact match. Returns: - int: The number of properties matching the specified filters and search. + The number of properties matching the specified filters and search. Examples: @@ -356,12 +356,12 @@ async def aggregate_cardinality_properties( """`Find approximate paths count for time series. `_ Args: - path (TimeSeriesProperty | str | list[str]): The scope in every document to aggregate properties. The only value allowed now is ["metadata"]. It means to aggregate only metadata properties (aka keys). - advanced_filter (Filter | dict[str, Any] | None): The filter to narrow down the time series to count cardinality. - aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets. - filter (TimeSeriesFilter | dict[str, Any] | None): The filter to narrow down the time series to count requiring exact match. + path: The scope in every document to aggregate properties. The only value allowed now is ["metadata"]. It means to aggregate only metadata properties (aka keys). + advanced_filter: The filter to narrow down the time series to count cardinality. + aggregate_filter: The filter to apply to the resulting buckets. + filter: The filter to narrow down the time series to count requiring exact match. Returns: - int: The number of properties matching the specified filters and search. + The number of properties matching the specified filters and search. Examples: @@ -392,13 +392,13 @@ async def aggregate_unique_values( """`Get unique properties with counts for time series. `_ Args: - property (TimeSeriesProperty | str | list[str]): The property to group by. - advanced_filter (Filter | dict[str, Any] | None): The filter to narrow down the time series to count cardinality. - aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets. - filter (TimeSeriesFilter | dict[str, Any] | None): The filter to narrow down the time series to count requiring exact match. + property: The property to group by. + advanced_filter: The filter to narrow down the time series to count cardinality. + aggregate_filter: The filter to apply to the resulting buckets. + filter: The filter to narrow down the time series to count requiring exact match. Returns: - UniqueResultList: List of unique values of time series matching the specified filters and search. + List of unique values of time series matching the specified filters and search. Examples: @@ -450,13 +450,13 @@ async def aggregate_unique_properties( """`Get unique paths with counts for time series. `_ Args: - path (TimeSeriesProperty | str | list[str]): The scope in every document to aggregate properties. The only value allowed now is ["metadata"]. It means to aggregate only metadata properties (aka keys). - advanced_filter (Filter | dict[str, Any] | None): The filter to narrow down the time series to count cardinality. - aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets. - filter (TimeSeriesFilter | dict[str, Any] | None): The filter to narrow down the time series to count requiring exact match. + path: The scope in every document to aggregate properties. The only value allowed now is ["metadata"]. It means to aggregate only metadata properties (aka keys). + advanced_filter: The filter to narrow down the time series to count cardinality. + aggregate_filter: The filter to apply to the resulting buckets. + filter: The filter to narrow down the time series to count requiring exact match. Returns: - UniqueResultList: List of unique values of time series matching the specified filters and search. + List of unique values of time series matching the specified filters and search. Examples: @@ -489,10 +489,10 @@ async def create( """`Create one or more time series. `_ Args: - time_series (TimeSeries | TimeSeriesWrite | Sequence[TimeSeries] | Sequence[TimeSeriesWrite]): TimeSeries or list of TimeSeries to create. + time_series: TimeSeries or list of TimeSeries to create. Returns: - TimeSeries | TimeSeriesList: The created time series. + The created time series. Examples: @@ -520,9 +520,9 @@ async def delete( """`Delete one or more time series. `_ Args: - id (int | Sequence[int] | None): Id or list of ids - external_id (str | SequenceNotStr[str] | None): External ID or list of external ids - ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception. + id: Id or list of ids + external_id: External ID or list of external ids + ignore_unknown_ids: Ignore IDs and external IDs that are not found rather than throw an exception. Examples: @@ -564,11 +564,11 @@ async def update( """`Update one or more time series. `_ Args: - item (TimeSeries | TimeSeriesWrite | TimeSeriesUpdate | Sequence[TimeSeries | TimeSeriesWrite | TimeSeriesUpdate]): Time series to update - mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (TimeSeries or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. + item: Time series to update + mode: How to update data when a non-update object is given (TimeSeries or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. Returns: - TimeSeries | TimeSeriesList: Updated time series. + Updated time series. Examples: @@ -629,11 +629,11 @@ async def upsert( For more details, see :ref:`appendix-upsert`. Args: - item (TimeSeries | TimeSeriesWrite | Sequence[TimeSeries | TimeSeriesWrite]): TimeSeries or list of TimeSeries to upsert. - mode (Literal['patch', 'replace']): Whether to patch or replace in the case the time series are existing. If you set 'patch', the call will only update fields with non-null values (default). Setting 'replace' will unset any fields that are not specified. + item: TimeSeries or list of TimeSeries to upsert. + mode: Whether to patch or replace in the case the time series are existing. If you set 'patch', the call will only update fields with non-null values (default). Setting 'replace' will unset any fields that are not specified. Returns: - TimeSeries | TimeSeriesList: The upserted time series(s). + The upserted time series(s). Examples: @@ -670,14 +670,14 @@ async def search( Primarily meant for human-centric use-cases and data exploration, not for programs, since matching and ordering may change over time. Use the `list` function if stable or exact matches are required. Args: - name (str | None): Prefix and fuzzy search on name. - description (str | None): Prefix and fuzzy search on description. - query (str | None): Search on name and description using wildcard search on each of the words (separated by spaces). Retrieves results where at least one word must match. Example: 'some other' - filter (TimeSeriesFilter | dict[str, Any] | None): Filter to apply. Performs exact match on these fields. - limit (int): Max number of results to return. + name: Prefix and fuzzy search on name. + description: Prefix and fuzzy search on description. + query: Search on name and description using wildcard search on each of the words (separated by spaces). Retrieves results where at least one word must match. Example: 'some other' + filter: Filter to apply. Performs exact match on these fields. + limit: Max number of results to return. Returns: - TimeSeriesList: List of requested time series. + List of requested time series. Examples: @@ -729,29 +729,29 @@ async def list( """`List time series `_ Args: - name (str | None): Name of the time series. Often referred to as tag. - unit (str | None): Unit of the time series. - unit_external_id (str | None): Filter on unit external ID. - unit_quantity (str | None): Filter on unit quantity. - is_string (bool | None): Whether the time series is a string time series. - is_step (bool | None): Whether the time series is a step (piecewise constant) time series. - asset_ids (Sequence[int] | None): List time series related to these assets. - asset_external_ids (SequenceNotStr[str] | None): List time series related to these assets. - asset_subtree_ids (int | Sequence[int] | None): Only include time series that are related to an asset in a subtree rooted at any of these assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. - asset_subtree_external_ids (str | SequenceNotStr[str] | None): Only include time series that are related to an asset in a subtree rooted at any of these assetExternalIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. - data_set_ids (int | Sequence[int] | None): Return only time series in the specified data set(s) with this id / these ids. - data_set_external_ids (str | SequenceNotStr[str] | None): Return only time series in the specified data set(s) with this external id / these external ids. - metadata (dict[str, Any] | None): Custom, application specific metadata. String key -> String value - external_id_prefix (str | None): Filter by this (case-sensitive) prefix for the external ID. - created_time (dict[str, Any] | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. - last_updated_time (dict[str, Any] | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. - partitions (int | None): Retrieve resources in parallel using this number of workers (values up to 10 allowed), limit must be set to `None` (or `-1`). - limit (int | None): Maximum number of time series to return. Defaults to 25. Set to -1, float("inf") or None to return all items. - advanced_filter (Filter | dict[str, Any] | None): Advanced filter query using the filter DSL (Domain Specific Language). It allows defining complex filtering expressions that combine simple operations, such as equals, prefix, exists, etc., using boolean operators and, or, and not. See examples below for usage. - sort (SortSpec | list[SortSpec] | TimeSeriesProperty | None): The criteria to sort by. Defaults to desc for `_score_` and asc for all other properties. Sort is not allowed if `partitions` is used. + name: Name of the time series. Often referred to as tag. + unit: Unit of the time series. + unit_external_id: Filter on unit external ID. + unit_quantity: Filter on unit quantity. + is_string: Whether the time series is a string time series. + is_step: Whether the time series is a step (piecewise constant) time series. + asset_ids: List time series related to these assets. + asset_external_ids: List time series related to these assets. + asset_subtree_ids: Only include time series that are related to an asset in a subtree rooted at any of these assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. + asset_subtree_external_ids: Only include time series that are related to an asset in a subtree rooted at any of these assetExternalIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. + data_set_ids: Return only time series in the specified data set(s) with this id / these ids. + data_set_external_ids: Return only time series in the specified data set(s) with this external id / these external ids. + metadata: Custom, application specific metadata. String key -> String value + external_id_prefix: Filter by this (case-sensitive) prefix for the external ID. + created_time: Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. + last_updated_time: Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. + partitions: Retrieve resources in parallel using this number of workers (values up to 10 allowed), limit must be set to `None` (or `-1`). + limit: Maximum number of time series to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + advanced_filter: Advanced filter query using the filter DSL (Domain Specific Language). It allows defining complex filtering expressions that combine simple operations, such as equals, prefix, exists, etc., using boolean operators and, or, and not. See examples below for usage. + sort: The criteria to sort by. Defaults to desc for `_score_` and asc for all other properties. Sort is not allowed if `partitions` is used. Returns: - TimeSeriesList: The requested time series. + The requested time series. .. note:: When using `partitions`, there are few considerations to keep in mind: diff --git a/cognite/client/_api/transformations/__init__.py b/cognite/client/_api/transformations/__init__.py index 9c98d98e70..914cf31de2 100644 --- a/cognite/client/_api/transformations/__init__.py +++ b/cognite/client/_api/transformations/__init__.py @@ -95,23 +95,23 @@ async def __call__( """Iterate over transformations Args: - chunk_size (int | None): Number of transformations to return in each chunk. Defaults to yielding one transformation a time. - include_public (bool): Whether public transformations should be included in the results. (default true). - name_regex (str | None): Regex expression to match the transformation name - query_regex (str | None): Regex expression to match the transformation query - destination_type (str | None): Transformation destination resource name to filter by. - conflict_mode (str | None): Filters by a selected transformation action type: abort/create, upsert, update, delete - cdf_project_name (str | None): Project name to filter by configured source and destination project - has_blocked_error (bool | None): Whether only the blocked transformations should be included in the results. - created_time (dict[str, Any] | TimestampRange | None): Range between two timestamps - last_updated_time (dict[str, Any] | TimestampRange | None): Range between two timestamps - data_set_ids (int | list[int] | None): Return only transformations in the specified data sets with these id(s). - data_set_external_ids (str | list[str] | None): Return only transformations in the specified data sets with these external id(s). - tags (TagsFilter | None): Return only the resource matching the specified tags constraints. It only supports ContainsAny as of now. - limit (int | None): Limits the number of results to be returned. Defaults to yielding all transformations. + chunk_size: Number of transformations to return in each chunk. Defaults to yielding one transformation a time. + include_public: Whether public transformations should be included in the results. (default true). + name_regex: Regex expression to match the transformation name + query_regex: Regex expression to match the transformation query + destination_type: Transformation destination resource name to filter by. + conflict_mode: Filters by a selected transformation action type: abort/create, upsert, update, delete + cdf_project_name: Project name to filter by configured source and destination project + has_blocked_error: Whether only the blocked transformations should be included in the results. + created_time: Range between two timestamps + last_updated_time: Range between two timestamps + data_set_ids: Return only transformations in the specified data sets with these id(s). + data_set_external_ids: Return only transformations in the specified data sets with these external id(s). + tags: Return only the resource matching the specified tags constraints. It only supports ContainsAny as of now. + limit: Limits the number of results to be returned. Defaults to yielding all transformations. Yields: - Transformation | TransformationList: Yields transformations in chunks if chunk_size is specified, otherwise one transformation at a time. + Yields transformations in chunks if chunk_size is specified, otherwise one transformation at a time. """ # noqa: DOC404 ds_ids = IdentifierSequence.load(data_set_ids, data_set_external_ids, id_name="data_set").as_dicts() @@ -155,10 +155,10 @@ async def create( """`Create one or more transformations. `_ Args: - transformation (Transformation | TransformationWrite | Sequence[Transformation] | Sequence[TransformationWrite]): Transformation or list of transformations to create. + transformation: Transformation or list of transformations to create. Returns: - Transformation | TransformationList: Created transformation(s) + Created transformation(s) Examples: @@ -257,9 +257,9 @@ async def delete( """`Delete one or more transformations. `_ Args: - id (int | Sequence[int] | None): Id or list of ids. - external_id (str | SequenceNotStr[str] | None): External ID or list of external ids. - ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception. + id: Id or list of ids. + external_id: External ID or list of external ids. + ignore_unknown_ids: Ignore IDs and external IDs that are not found rather than throw an exception. Example: @@ -295,22 +295,22 @@ async def list( """`List all transformations. `_ Args: - include_public (bool): Whether public transformations should be included in the results. (default true). - name_regex (str | None): Regex expression to match the transformation name - query_regex (str | None): Regex expression to match the transformation query - destination_type (str | None): Transformation destination resource name to filter by. - conflict_mode (str | None): Filters by a selected transformation action type: abort/create, upsert, update, delete - cdf_project_name (str | None): Project name to filter by configured source and destination project - has_blocked_error (bool | None): Whether only the blocked transformations should be included in the results. - created_time (dict[str, Any] | TimestampRange | None): Range between two timestamps - last_updated_time (dict[str, Any] | TimestampRange | None): Range between two timestamps - data_set_ids (int | list[int] | None): Return only transformations in the specified data sets with these id(s). - data_set_external_ids (str | list[str] | None): Return only transformations in the specified data sets with these external id(s). - tags (TagsFilter | None): Return only the resource matching the specified tags constraints. It only supports ContainsAny as of now. - limit (int | None): Limits the number of results to be returned. To retrieve all results use limit=-1, default limit is 25. + include_public: Whether public transformations should be included in the results. (default true). + name_regex: Regex expression to match the transformation name + query_regex: Regex expression to match the transformation query + destination_type: Transformation destination resource name to filter by. + conflict_mode: Filters by a selected transformation action type: abort/create, upsert, update, delete + cdf_project_name: Project name to filter by configured source and destination project + has_blocked_error: Whether only the blocked transformations should be included in the results. + created_time: Range between two timestamps + last_updated_time: Range between two timestamps + data_set_ids: Return only transformations in the specified data sets with these id(s). + data_set_external_ids: Return only transformations in the specified data sets with these external id(s). + tags: Return only the resource matching the specified tags constraints. It only supports ContainsAny as of now. + limit: Limits the number of results to be returned. To retrieve all results use limit=-1, default limit is 25. Returns: - TransformationList: List of transformations + List of transformations Example: @@ -350,11 +350,11 @@ async def retrieve(self, id: int | None = None, external_id: str | None = None) """`Retrieve a single transformation by id. `_ Args: - id (int | None): ID - external_id (str | None): No description. + id: ID + external_id: No description. Returns: - Transformation | None: Requested transformation or None if it does not exist. + Requested transformation or None if it does not exist. Examples: @@ -385,12 +385,12 @@ async def retrieve_multiple( """`Retrieve multiple transformations. `_ Args: - ids (Sequence[int] | None): List of ids to retrieve. - external_ids (SequenceNotStr[str] | None): List of external ids to retrieve. - ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception. + ids: List of ids to retrieve. + external_ids: List of external ids to retrieve. + ignore_unknown_ids: Ignore IDs and external IDs that are not found rather than throw an exception. Returns: - TransformationList: Requested transformation or None if it does not exist. + Requested transformation or None if it does not exist. Examples: @@ -434,11 +434,11 @@ async def update( """`Update one or more transformations `_ Args: - item (Transformation | TransformationWrite | TransformationUpdate | Sequence[Transformation | TransformationWrite | TransformationUpdate]): Transformation(s) to update - mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (Transformation or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. + item: Transformation(s) to update + mode: How to update data when a non-update object is given (Transformation or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. Returns: - Transformation | TransformationList: Updated transformation(s) + Updated transformation(s) Examples: @@ -516,13 +516,13 @@ async def run( """`Run a transformation. `_ Args: - transformation_id (int | None): Transformation internal id - transformation_external_id (str | None): Transformation external id - wait (bool): Wait until the transformation run is finished. Defaults to True. - timeout (float | None): maximum time (s) to wait, default is None (infinite time). Once the timeout is reached, it returns with the current status. Won't have any effect if wait is False. + transformation_id: Transformation internal id + transformation_external_id: Transformation external id + wait: Wait until the transformation run is finished. Defaults to True. + timeout: maximum time (s) to wait, default is None (infinite time). Once the timeout is reached, it returns with the current status. Won't have any effect if wait is False. Returns: - TransformationJob: Created transformation job + Created transformation job Examples: @@ -554,8 +554,8 @@ async def cancel(self, transformation_id: int | None = None, transformation_exte """`Cancel a running transformation. `_ Args: - transformation_id (int | None): Transformation internal id - transformation_external_id (str | None): Transformation external id + transformation_id: Transformation internal id + transformation_external_id: Transformation external id Examples: @@ -586,15 +586,15 @@ async def preview( """`Preview the result of a query. `_ Args: - query (str | None): SQL query to run for preview. - convert_to_string (bool): Stringify values in the query results, default is False. - limit (int | None): Maximum number of rows to return in the final result, default is 100. - source_limit (int | None): Maximum number of items to read from the data source or None to run without limit, default is 100. - infer_schema_limit (int | None): Limit for how many rows that are used for inferring result schema, default is 10 000. - timeout (int | None): Number of seconds to wait before cancelling a query. The default, and maximum, is 240. + query: SQL query to run for preview. + convert_to_string: Stringify values in the query results, default is False. + limit: Maximum number of rows to return in the final result, default is 100. + source_limit: Maximum number of items to read from the data source or None to run without limit, default is 100. + infer_schema_limit: Limit for how many rows that are used for inferring result schema, default is 10 000. + timeout: Number of seconds to wait before cancelling a query. The default, and maximum, is 240. Returns: - TransformationPreviewResult: Result of the executed query + Result of the executed query Examples: diff --git a/cognite/client/_api/transformations/jobs.py b/cognite/client/_api/transformations/jobs.py index 5964aa068c..3cb9e951fe 100644 --- a/cognite/client/_api/transformations/jobs.py +++ b/cognite/client/_api/transformations/jobs.py @@ -27,12 +27,12 @@ async def list( """`List all running transformation jobs. `_ Args: - limit (int | None): Limits the number of results to be returned. To retrieve all results use limit=-1, default limit is 25. - transformation_id (int | None): Filters the results by the internal transformation id. - transformation_external_id (str | None): Filters the results by the external transformation id. + limit: Limits the number of results to be returned. To retrieve all results use limit=-1, default limit is 25. + transformation_id: Filters the results by the internal transformation id. + transformation_external_id: Filters the results by the external transformation id. Returns: - TransformationJobList: List of transformation jobs + List of transformation jobs Example: @@ -63,10 +63,10 @@ async def retrieve(self, id: int) -> TransformationJob | None: """`Retrieve a single transformation job by id. `_ Args: - id (int): Job internal Id + id: Job internal Id Returns: - TransformationJob | None: Requested transformation job or None if it does not exist. + Requested transformation job or None if it does not exist. Examples: @@ -86,10 +86,10 @@ async def list_metrics(self, id: int) -> TransformationJobMetricList: """`List the metrics of a single transformation job. `_ Args: - id (int): Job internal Id + id: Job internal Id Returns: - TransformationJobMetricList: List of updated metrics of the given job. + List of updated metrics of the given job. Examples: @@ -114,11 +114,11 @@ async def retrieve_multiple(self, ids: Sequence[int], ignore_unknown_ids: bool = """`Retrieve multiple transformation jobs by id. `_ Args: - ids (Sequence[int]): Job internal Ids - ignore_unknown_ids (bool): Ignore IDs that are not found rather than throw an exception. + ids: Job internal Ids + ignore_unknown_ids: Ignore IDs that are not found rather than throw an exception. Returns: - TransformationJobList: Requested transformation jobs. + Requested transformation jobs. Examples: diff --git a/cognite/client/_api/transformations/notifications.py b/cognite/client/_api/transformations/notifications.py index 9e1aa34e61..5a771eb088 100644 --- a/cognite/client/_api/transformations/notifications.py +++ b/cognite/client/_api/transformations/notifications.py @@ -52,14 +52,14 @@ async def __call__( """Iterate over transformation notifications Args: - chunk_size (int | None): Number of notifications to yield per chunk. Defaults to yielding notifications one by one. - transformation_id (int | None): Filter by transformation internal numeric ID. - transformation_external_id (str | None): Filter by transformation externalId. - destination (str | None): Filter by notification destination. - limit (int | None): Limits the number of results to be returned. Defaults to yielding all notifications. + chunk_size: Number of notifications to yield per chunk. Defaults to yielding notifications one by one. + transformation_id: Filter by transformation internal numeric ID. + transformation_external_id: Filter by transformation externalId. + destination: Filter by notification destination. + limit: Limits the number of results to be returned. Defaults to yielding all notifications. Yields: - TransformationNotification | TransformationNotificationList: Yields notifications one by one if chunk_size is None, otherwise yields lists of notifications. + Yields notifications one by one if chunk_size is None, otherwise yields lists of notifications. """ # noqa: DOC404 filter_ = TransformationNotificationFilter( transformation_id=transformation_id, @@ -97,10 +97,10 @@ async def create( """`Subscribe for notifications on the transformation errors. `_ Args: - notification (TransformationNotification | TransformationNotificationWrite | Sequence[TransformationNotification] | Sequence[TransformationNotificationWrite]): Notification or list of notifications to create. + notification: Notification or list of notifications to create. Returns: - TransformationNotification | TransformationNotificationList: Created notification(s) + Created notification(s) Examples: @@ -131,13 +131,13 @@ async def list( """`List notification subscriptions. `_ Args: - transformation_id (int | None): Filter by transformation internal numeric ID. - transformation_external_id (str | None): Filter by transformation externalId. - destination (str | None): Filter by notification destination. - limit (int | None): Limits the number of results to be returned. To retrieve all results use limit=-1, default limit is 25. + transformation_id: Filter by transformation internal numeric ID. + transformation_external_id: Filter by transformation externalId. + destination: Filter by notification destination. + limit: Limits the number of results to be returned. To retrieve all results use limit=-1, default limit is 25. Returns: - TransformationNotificationList: List of transformation notifications + List of transformation notifications Example: @@ -173,7 +173,7 @@ async def delete(self, id: int | Sequence[int] | None = None) -> None: """`Deletes the specified notification subscriptions on the transformation. Does nothing when the subscriptions already don't exist `_ Args: - id (int | Sequence[int] | None): Id or list of transformation notification ids + id: Id or list of transformation notification ids Examples: diff --git a/cognite/client/_api/transformations/schedules.py b/cognite/client/_api/transformations/schedules.py index e25a43cfe9..f4505eefd0 100644 --- a/cognite/client/_api/transformations/schedules.py +++ b/cognite/client/_api/transformations/schedules.py @@ -47,12 +47,12 @@ async def __call__( """Iterate over transformation schedules Args: - chunk_size (int | None): The number of schedules to return in each chunk. Defaults to yielding one schedule a time. - include_public (bool): Whether public transformations should be included in the results. (default true). - limit (int | None): Limits the number of results to be returned. Defaults to yielding all schedules. + chunk_size: The number of schedules to return in each chunk. Defaults to yielding one schedule a time. + include_public: Whether public transformations should be included in the results. (default true). + limit: Limits the number of results to be returned. Defaults to yielding all schedules. Yields: - TransformationSchedule | TransformationScheduleList: Yields schedules one by one if chunk_size is None, otherwise yields lists of schedules. + Yields schedules one by one if chunk_size is None, otherwise yields lists of schedules. """ # noqa: DOC404 async for item in self._list_generator( method="GET", @@ -84,10 +84,10 @@ async def create( """`Schedule the specified transformation with the specified configuration(s). `_ Args: - schedule (TransformationSchedule | TransformationScheduleWrite | Sequence[TransformationSchedule] | Sequence[TransformationScheduleWrite]): Configuration or list of configurations of the schedules to create. + schedule: Configuration or list of configurations of the schedules to create. Returns: - TransformationSchedule | TransformationScheduleList: Created schedule(s) + Created schedule(s) Examples: @@ -113,11 +113,11 @@ async def retrieve(self, id: int | None = None, external_id: str | None = None) """`Retrieve a single transformation schedule by the id or external id of its transformation. `_ Args: - id (int | None): transformation ID - external_id (str | None): transformation External ID + id: transformation ID + external_id: transformation External ID Returns: - TransformationSchedule | None: Requested transformation schedule or None if it does not exist. + Requested transformation schedule or None if it does not exist. Examples: @@ -146,12 +146,12 @@ async def retrieve_multiple( """`Retrieve multiple transformation schedules by the ids or external ids of the corresponding transformations. `_ Args: - ids (Sequence[int] | None): transformation IDs - external_ids (SequenceNotStr[str] | None): transformation External IDs - ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception. + ids: transformation IDs + external_ids: transformation External IDs + ignore_unknown_ids: Ignore IDs and external IDs that are not found rather than throw an exception. Returns: - TransformationScheduleList: Requested transformation schedules. + Requested transformation schedules. Examples: @@ -180,11 +180,11 @@ async def list( """`List all transformation schedules. `_ Args: - include_public (bool): Whether public transformations should be included in the results. (default true). - limit (int | None): Limits the number of results to be returned. To retrieve all results use limit=-1, default limit is 25. + include_public: Whether public transformations should be included in the results. (default true). + limit: Limits the number of results to be returned. To retrieve all results use limit=-1, default limit is 25. Returns: - TransformationScheduleList: List of schedules + List of schedules Example: @@ -214,9 +214,9 @@ async def delete( """`Unschedule one or more transformations `_ Args: - id (int | Sequence[int] | None): Id or list of ids - external_id (str | SequenceNotStr[str] | None): External ID or list of external ids - ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception. + id: Id or list of ids + external_id: External ID or list of external ids + ignore_unknown_ids: Ignore IDs and external IDs that are not found rather than throw an exception. Examples: @@ -258,11 +258,11 @@ async def update( """`Update one or more transformation schedules `_ Args: - item (TransformationSchedule | TransformationScheduleWrite | TransformationScheduleUpdate | Sequence[TransformationSchedule | TransformationScheduleWrite | TransformationScheduleUpdate]): Transformation schedule(s) to update - mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (TransformationSchedule or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. + item: Transformation schedule(s) to update + mode: How to update data when a non-update object is given (TransformationSchedule or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. Returns: - TransformationSchedule | TransformationScheduleList: Updated transformation schedule(s) + Updated transformation schedule(s) Examples: diff --git a/cognite/client/_api/transformations/schema.py b/cognite/client/_api/transformations/schema.py index 0fce2c6456..1b09df2be2 100644 --- a/cognite/client/_api/transformations/schema.py +++ b/cognite/client/_api/transformations/schema.py @@ -18,11 +18,11 @@ async def retrieve( """`Get expected schema for a transformation destination. `_ Args: - destination (TransformationDestination): destination for which the schema is requested. - conflict_mode (str | None): conflict mode for which the schema is requested. + destination: destination for which the schema is requested. + conflict_mode: conflict mode for which the schema is requested. Returns: - TransformationSchemaColumnList: List of column descriptions + List of column descriptions Example: diff --git a/cognite/client/_api/unit_system.py b/cognite/client/_api/unit_system.py index 924219a9d5..0b552abc5a 100644 --- a/cognite/client/_api/unit_system.py +++ b/cognite/client/_api/unit_system.py @@ -11,7 +11,7 @@ async def list(self) -> UnitSystemList: """`List all supported unit systems `_ Returns: - UnitSystemList: List of unit systems + List of unit systems Examples: diff --git a/cognite/client/_api/units.py b/cognite/client/_api/units.py index c2de1ac66b..be95a4a2b6 100644 --- a/cognite/client/_api/units.py +++ b/cognite/client/_api/units.py @@ -54,11 +54,11 @@ async def retrieve( """`Retrieve one or more unit `_ Args: - external_id (str | SequenceNotStr[str]): External ID or list of external IDs - ignore_unknown_ids (bool): Ignore external IDs that are not found rather than throw an exception. + external_id: External ID or list of external IDs + ignore_unknown_ids: Ignore external IDs that are not found rather than throw an exception. Returns: - Unit | UnitList | None: If a single external ID is specified: the requested unit, or None if it does not exist. If several external IDs are specified: the requested units. + the requested units. Examples: @@ -122,13 +122,13 @@ async def from_alias( may not be any close matches, in which case an empty UnitList is returned. Args: - alias (str): Alias of the unit, like 'cmol / L' or 'meter per second'. - quantity (str | None): Quantity of the unit, like 'Temperature' or 'Pressure'. - return_ambiguous (bool): If False (default), when the alias is ambiguous (i.e. no quantity was given), raise a ValueError. If True, return the list of all matching units. - return_closest_matches (bool): If False (default), when the lookup fails, raise a ValueError (default). If True, return the closest matching units (even if empty). + alias: Alias of the unit, like 'cmol / L' or 'meter per second'. + quantity: Quantity of the unit, like 'Temperature' or 'Pressure'. + return_ambiguous: If False (default), when the alias is ambiguous (i.e. no quantity was given), raise a ValueError. If True, return the list of all matching units. + return_closest_matches: If False (default), when the lookup fails, raise a ValueError (default). If True, return the closest matching units (even if empty). Returns: - Unit | UnitList: The unit if found, else a ValueError is raised. If one or both of ``return_ambiguous`` and ``return_closest_matches`` is passed as True, a UnitList may be returned. + The unit if found, else a ValueError is raised. If one or both of ``return_ambiguous`` and ``return_closest_matches`` is passed as True, a UnitList may be returned. Examples: @@ -205,7 +205,7 @@ async def list(self) -> UnitList: """`List all supported units `_ Returns: - UnitList: List of units + List of units Examples: diff --git a/cognite/client/_api/user_profiles.py b/cognite/client/_api/user_profiles.py index 979c6fa5ac..0516da2ac1 100644 --- a/cognite/client/_api/user_profiles.py +++ b/cognite/client/_api/user_profiles.py @@ -36,7 +36,7 @@ async def me(self) -> UserProfile: Retrieves the user profile of the principal issuing the request, i.e. the principal *this* AsyncCogniteClient was instantiated with. Returns: - UserProfile: Your own user profile. + Your own user profile. Raises: CogniteAPIError: If this principal doesn't have a user profile, you get a not found (404) response code. @@ -66,10 +66,10 @@ async def retrieve(self, user_identifier: str | SequenceNotStr[str]) -> UserProf Retrieves one or more user profiles indexed by the user identifier in the same CDF project. Args: - user_identifier (str | SequenceNotStr[str]): The single user identifier (or sequence of) to retrieve profile(s) for. + user_identifier: The single user identifier (or sequence of) to retrieve profile(s) for. Returns: - UserProfile | UserProfileList | None: UserProfileList if a sequence of user identifier were requested, else UserProfile. If a single user identifier is requested and it is not found, None is returned. + UserProfileList if a sequence of user identifier were requested, else UserProfile. If a single user identifier is requested and it is not found, None is returned. Raises: CogniteNotFoundError: A sequences of user identifiers were requested, but one or more does not exist. @@ -99,11 +99,11 @@ async def search(self, name: str, limit: int = DEFAULT_LIMIT_READ) -> UserProfil Primarily meant for human-centric use-cases and data exploration, not for programs, as the result set ordering and match criteria threshold may change over time. Args: - name (str): Prefix search on name. - limit (int): Maximum number of results to return. + name: Prefix search on name. + limit: Maximum number of results to return. Returns: - UserProfileList: User profiles search result + User profiles search result Examples: @@ -127,10 +127,10 @@ async def list(self, limit: int | None = DEFAULT_LIMIT_READ) -> UserProfileList: List all user profiles in the current CDF project. The results are ordered alphabetically by name. Args: - limit (int | None): Maximum number of user profiles to return. Defaults to 25. Set to -1, float("inf") or None to return all. + limit: Maximum number of user profiles to return. Defaults to 25. Set to -1, float("inf") or None to return all. Returns: - UserProfileList: List of user profiles. + List of user profiles. Examples: diff --git a/cognite/client/_api/vision.py b/cognite/client/_api/vision.py index f1f7d7c9a8..acd5721d44 100644 --- a/cognite/client/_api/vision.py +++ b/cognite/client/_api/vision.py @@ -23,10 +23,10 @@ def _process_file_ids(ids: list[int] | int | None, external_ids: list[str] | str expected by the Context API. Args: - ids (list[int] | int | None): No description. - external_ids (list[str] | str | None): No description. + ids: No description. + external_ids: No description. Returns: - list: No description.""" + No description.""" identifier_sequence = IdentifierSequence.load(ids=ids, external_ids=external_ids).as_primitives() id_objs = [{"fileId": id} for id in identifier_sequence if isinstance(id, int)] external_id_objs = [ @@ -44,12 +44,12 @@ async def extract( """`Start an asynchronous job to extract features from image files. `_ Args: - features (VisionFeature | list[VisionFeature]): The feature(s) to extract from the provided image files. - file_ids (list[int] | None): IDs of the image files to analyze. The images must already be uploaded in the same CDF project. - file_external_ids (list[str] | None): The external file ids of the image files to analyze. - parameters (FeatureParameters | None): No description. + features: The feature(s) to extract from the provided image files. + file_ids: IDs of the image files to analyze. The images must already be uploaded in the same CDF project. + file_external_ids: The external file ids of the image files to analyze. + parameters: No description. Returns: - VisionExtractJob: Resulting queued job, which can be used to retrieve the status of the job or the prediction results if the job is finished. Note that .result property of this job will wait for the job to finish and returns the results. + Resulting queued job, which can be used to retrieve the status of the job or the prediction results if the job is finished. Note that .result property of this job will wait for the job to finish and returns the results. Examples: Start a job, wait for completion and then get the parsed results: @@ -94,10 +94,10 @@ async def get_extract_job(self, job_id: int) -> VisionExtractJob: """`Retrieve an existing extract job by ID. `_ Args: - job_id (int): ID of an existing feature extraction job. + job_id: ID of an existing feature extraction job. Returns: - VisionExtractJob: Vision extract job, which can be used to retrieve the status of the job or the prediction results if the job is finished. Note that .result property of this job will wait for the job to finish and returns the results. + Vision extract job, which can be used to retrieve the status of the job or the prediction results if the job is finished. Note that .result property of this job will wait for the job to finish and returns the results. Examples: Retrieve a vision extract job by ID: diff --git a/cognite/client/_api/workflows/__init__.py b/cognite/client/_api/workflows/__init__.py index 4d89b01912..9b861cf5b3 100644 --- a/cognite/client/_api/workflows/__init__.py +++ b/cognite/client/_api/workflows/__init__.py @@ -60,11 +60,11 @@ async def __call__( """Iterate over workflows Args: - chunk_size (int | None): The number of workflows to return in each chunk. Defaults to yielding one workflow at a time. - limit (int | None): Maximum number of workflows to return. Defaults to returning all items. + chunk_size: The number of workflows to return in each chunk. Defaults to yielding one workflow at a time. + limit: Maximum number of workflows to return. Defaults to returning all items. Yields: - Workflow | WorkflowList: Yields Workflow one by one if chunk_size is None, otherwise yields WorkflowList objects. + Yields Workflow one by one if chunk_size is None, otherwise yields WorkflowList objects. """ # noqa: DOC404 async for item in self._list_generator( method="GET", resource_cls=Workflow, list_cls=WorkflowList, limit=limit, chunk_size=chunk_size @@ -87,11 +87,11 @@ async def upsert( Note this is an upsert endpoint, so workflows that already exist will be updated, and new ones will be created. Args: - workflow (WorkflowUpsert | Sequence[WorkflowUpsert]): The workflow(s) to upsert. - mode (Literal['replace']): This is not an option for the API, but is included here to document that the upserts are always done in replace mode. + workflow: The workflow(s) to upsert. + mode: This is not an option for the API, but is included here to document that the upserts are always done in replace mode. Returns: - Workflow | WorkflowList: The created workflow(s). + The created workflow(s). Examples: @@ -133,11 +133,11 @@ async def retrieve( """`Retrieve one or more workflows. `_ Args: - external_id (str | SequenceNotStr[str]): Identifier (or sequence of identifiers) for a Workflow. Must be unique. - ignore_unknown_ids (bool): When requesting multiple workflows, whether to ignore external IDs that are not found rather than throwing an exception. + external_id: Identifier (or sequence of identifiers) for a Workflow. Must be unique. + ignore_unknown_ids: When requesting multiple workflows, whether to ignore external IDs that are not found rather than throwing an exception. Returns: - Workflow | WorkflowList | None: If a single external ID is specified: the requested workflow, or None if it does not exist. If several external IDs are specified: the requested workflows. + the requested workflows. Examples: @@ -178,8 +178,8 @@ async def delete(self, external_id: str | SequenceNotStr[str], ignore_unknown_id """`Delete one or more workflows with versions. `_ Args: - external_id (str | SequenceNotStr[str]): External id or list of external ids to delete. - ignore_unknown_ids (bool): Ignore external ids that are not found rather than throw an exception. + external_id: External id or list of external ids to delete. + ignore_unknown_ids: Ignore external ids that are not found rather than throw an exception. Examples: @@ -200,10 +200,10 @@ async def list(self, limit: int | None = DEFAULT_LIMIT_READ) -> WorkflowList: """`List workflows in the project. `_ Args: - limit (int | None): Maximum number of results to return. Defaults to 25. Set to -1, float("inf") or None + limit: Maximum number of results to return. Defaults to 25. Set to -1, float("inf") or None Returns: - WorkflowList: Workflows in the CDF project. + Workflows in the CDF project. Examples: diff --git a/cognite/client/_api/workflows/executions.py b/cognite/client/_api/workflows/executions.py index c9e7758607..79342b6493 100644 --- a/cognite/client/_api/workflows/executions.py +++ b/cognite/client/_api/workflows/executions.py @@ -29,10 +29,10 @@ async def retrieve_detailed(self, id: str) -> WorkflowExecutionDetailed | None: """`Retrieve a workflow execution with detailed information. `_ Args: - id (str): The server-generated id of the workflow execution. + id: The server-generated id of the workflow execution. Returns: - WorkflowExecutionDetailed | None: The requested workflow execution if it exists, None otherwise. + The requested workflow execution if it exists, None otherwise. Examples: @@ -69,12 +69,12 @@ async def run( """`Run a workflow execution. `_ Args: - workflow_external_id (str): External id of the workflow. - version (str): Version of the workflow. - input (dict | None): The input to the workflow execution. This will be available for tasks that have specified it as an input with the string "${workflow.input}" See tip below for more information. - metadata (dict | None): Application specific metadata. Keys have a maximum length of 32 characters, values a maximum of 255, and there can be a maximum of 10 key-value pairs. - client_credentials (ClientCredentials | None): Specific credentials that should be used to trigger the workflow execution. When passed will take precedence over the current credentials. - nonce (str | None): The nonce to use to bind the session. If not provided, a new session will be created using the given 'client_credentials'. If this is not given, the current credentials will be used. + workflow_external_id: External id of the workflow. + version: Version of the workflow. + input: The input to the workflow execution. This will be available for tasks that have specified it as an input with the string "${workflow.input}" See tip below for more information. + metadata: Application specific metadata. Keys have a maximum length of 32 characters, values a maximum of 255, and there can be a maximum of 10 key-value pairs. + client_credentials: Specific credentials that should be used to trigger the workflow execution. When passed will take precedence over the current credentials. + nonce: The nonce to use to bind the session. If not provided, a new session will be created using the given 'client_credentials'. If this is not given, the current credentials will be used. Tip: The workflow input can be available in the workflow tasks. For example, if you have a Task with @@ -91,7 +91,7 @@ async def run( You can create a session via the Sessions API, using the client.iam.session.create() method. Returns: - WorkflowExecution: The created workflow execution. + The created workflow execution. Examples: @@ -140,14 +140,14 @@ async def list( """`List workflow executions in the project. `_ Args: - workflow_version_ids (WorkflowVersionIdentifier | MutableSequence[WorkflowVersionIdentifier] | None): Workflow version id or list of workflow version ids to filter on. - created_time_start (int | None): Filter out executions that was created before this time. Time is in milliseconds since epoch. - created_time_end (int | None): Filter out executions that was created after this time. Time is in milliseconds since epoch. - statuses (WorkflowStatus | MutableSequence[WorkflowStatus] | None): Workflow status or list of workflow statuses to filter on. - limit (int | None): Maximum number of results to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + workflow_version_ids: Workflow version id or list of workflow version ids to filter on. + created_time_start: Filter out executions that was created before this time. Time is in milliseconds since epoch. + created_time_end: Filter out executions that was created after this time. Time is in milliseconds since epoch. + statuses: Workflow status or list of workflow statuses to filter on. + limit: Maximum number of results to return. Defaults to 25. Set to -1, float("inf") or None to return all items. Returns: - WorkflowExecutionList: The requested workflow executions. + The requested workflow executions. Examples: @@ -202,12 +202,11 @@ async def cancel(self, id: str, reason: str | None) -> WorkflowExecution: other services (like transformations and functions). Args: - id (str): The server-generated id of the workflow execution. - reason (str | None): The reason for the cancellation, this will be put within the execution's `reasonForIncompletion` field. It is defaulted to 'cancelled' if not provided. - + id: The server-generated id of the workflow execution. + reason: The reason for the cancellation, this will be put within the execution's `reasonForIncompletion` field. It is defaulted to 'cancelled' if not provided. Returns: - WorkflowExecution: The canceled workflow execution. + The canceled workflow execution. Examples: @@ -230,11 +229,11 @@ async def retry(self, id: str, client_credentials: ClientCredentials | None = No """`Retry a workflow execution. `_ Args: - id (str): The server-generated id of the workflow execution. - client_credentials (ClientCredentials | None): Specific credentials that should be used to retry the workflow execution. When passed will take precedence over the current credentials. + id: The server-generated id of the workflow execution. + client_credentials: Specific credentials that should be used to retry the workflow execution. When passed will take precedence over the current credentials. Returns: - WorkflowExecution: The retried workflow execution. + The retried workflow execution. Examples: Retry a workflow execution that has been cancelled or failed: diff --git a/cognite/client/_api/workflows/tasks.py b/cognite/client/_api/workflows/tasks.py index ed187fc762..609e2f4189 100644 --- a/cognite/client/_api/workflows/tasks.py +++ b/cognite/client/_api/workflows/tasks.py @@ -20,12 +20,12 @@ async def update( For tasks that has been marked with 'is_async = True', the status must be updated by calling this endpoint with either 'completed', 'failed' or 'failed_with_terminal_error'. Args: - task_id (str): The server-generated id of the task. - status (Literal['completed', 'failed', 'failed_with_terminal_error']): The new status of the task. Must be either 'completed', 'failed' or 'failed_with_terminal_error'. - output (dict | None): The output of the task. This will be available for tasks that has specified it as an output with the string "${.output}" + task_id: The server-generated id of the task. + status: The new status of the task. Must be either 'completed', 'failed' or 'failed_with_terminal_error'. + output: The output of the task. This will be available for tasks that has specified it as an output with the string "${.output}" Returns: - WorkflowTaskExecution: The updated task execution. + The updated task execution. Examples: diff --git a/cognite/client/_api/workflows/triggers.py b/cognite/client/_api/workflows/triggers.py index 1ab2bec3c8..7b9116db58 100644 --- a/cognite/client/_api/workflows/triggers.py +++ b/cognite/client/_api/workflows/triggers.py @@ -36,11 +36,11 @@ async def upsert( """`Create or update a trigger for a workflow. `_ Args: - workflow_trigger (WorkflowTriggerUpsert): The workflow trigger specification. - client_credentials (ClientCredentials | dict | None): Specific credentials that should be used to trigger the workflow execution. When passed will take precedence over the current credentials. + workflow_trigger: The workflow trigger specification. + client_credentials: Specific credentials that should be used to trigger the workflow execution. When passed will take precedence over the current credentials. Returns: - WorkflowTrigger: The created or updated workflow trigger specification. + The created or updated workflow trigger specification. Examples: @@ -102,7 +102,7 @@ async def delete(self, external_id: str | SequenceNotStr[str]) -> None: """`Delete one or more triggers for a workflow. `_ Args: - external_id (str | SequenceNotStr[str]): The external id(s) of the trigger(s) to delete. + external_id: The external id(s) of the trigger(s) to delete. Examples: @@ -126,10 +126,10 @@ async def list(self, limit: int | None = DEFAULT_LIMIT_READ) -> WorkflowTriggerL """`List the workflow triggers. `_ Args: - limit (int | None): Maximum number of results to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + limit: Maximum number of results to return. Defaults to 25. Set to -1, float("inf") or None to return all items. Returns: - WorkflowTriggerList: The list of triggers. + The list of triggers. Examples: @@ -152,11 +152,11 @@ async def list_runs(self, external_id: str, limit: int | None = DEFAULT_LIMIT_RE """`List the history of runs for a trigger. `_ Args: - external_id (str): The external id of the trigger to list runs for. - limit (int | None): Maximum number of results to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + external_id: The external id of the trigger to list runs for. + limit: Maximum number of results to return. Defaults to 25. Set to -1, float("inf") or None to return all items. Returns: - WorkflowTriggerRunList: The requested trigger runs. + The requested trigger runs. Examples: @@ -182,7 +182,7 @@ async def pause(self, external_id: str) -> None: This operation is idempotent - pausing an already paused trigger has no effect. Args: - external_id (str): The external id of the trigger to pause. + external_id: The external id of the trigger to pause. Examples: @@ -205,7 +205,7 @@ async def resume(self, external_id: str) -> None: This operation is idempotent - resuming an already active trigger has no effect. Args: - external_id (str): The external id of the trigger to resume. + external_id: The external id of the trigger to resume. Examples: diff --git a/cognite/client/_api/workflows/versions.py b/cognite/client/_api/workflows/versions.py index 4ea72d99d0..99200511ea 100644 --- a/cognite/client/_api/workflows/versions.py +++ b/cognite/client/_api/workflows/versions.py @@ -66,12 +66,12 @@ async def __call__( """Iterate over workflow versions Args: - chunk_size (int | None): The number of workflow versions to return in each chunk. Defaults to yielding one workflow version at a time. - workflow_version_ids (WorkflowIdentifier | MutableSequence[WorkflowIdentifier] | None): Workflow version id or list of workflow version ids to filter on. - limit (int | None): Maximum number of workflow versions to return. Defaults to returning all. + chunk_size: The number of workflow versions to return in each chunk. Defaults to yielding one workflow version at a time. + workflow_version_ids: Workflow version id or list of workflow version ids to filter on. + limit: Maximum number of workflow versions to return. Defaults to returning all. Yields: - WorkflowVersion | WorkflowVersionList: Yields WorkflowVersion one by one if chunk_size is None, otherwise yields WorkflowVersionList objects. + Yields WorkflowVersion one by one if chunk_size is None, otherwise yields WorkflowVersionList objects. """ # noqa: DOC404 async for item in self._list_generator( method="GET", @@ -97,11 +97,11 @@ async def upsert( Note this is an upsert endpoint, so workflow versions that already exist will be updated, and new ones will be created. Args: - version (WorkflowVersionUpsert | Sequence[WorkflowVersionUpsert]): The workflow version(s) to upsert. - mode (Literal['replace']): This is not an option for the API, but is included here to document that the upserts are always done in replace mode. + version: The workflow version(s) to upsert. + mode: This is not an option for the API, but is included here to document that the upserts are always done in replace mode. Returns: - WorkflowVersion | WorkflowVersionList: The created workflow version(s). + The created workflow version(s). Examples: @@ -153,8 +153,8 @@ async def delete( """`Delete a workflow version(s). `_ Args: - workflow_version_id (WorkflowVersionIdentifier | MutableSequence[WorkflowVersionId] | MutableSequence[tuple[str, str]]): Workflow version id or list of workflow version ids to delete. - ignore_unknown_ids (bool): Ignore external ids that are not found rather than throw an exception. + workflow_version_id: Workflow version id or list of workflow version ids to delete. + ignore_unknown_ids: Ignore external ids that are not found rather than throw an exception. Examples: @@ -195,11 +195,11 @@ async def retrieve( """`Retrieve a workflow version. `_ Args: - workflow_external_id (WorkflowVersionIdentifier | Sequence[WorkflowVersionIdentifier] | WorkflowIds): External id of the workflow. - ignore_unknown_ids (bool): When requesting multiple, whether to ignore external IDs that are not found rather than throwing an exception. + workflow_external_id: External id of the workflow. + ignore_unknown_ids: When requesting multiple, whether to ignore external IDs that are not found rather than throwing an exception. Returns: - WorkflowVersion | WorkflowVersionList | None: If a single identifier is specified: the requested workflow version, or None if it does not exist. If several ids are specified: the requested workflow versions. + the requested workflow versions. Examples: @@ -263,11 +263,11 @@ async def list( """`List workflow versions in the project `_ Args: - workflow_version_ids (WorkflowIdentifier | MutableSequence[WorkflowIdentifier] | None): Workflow version id or list of workflow version ids to filter on. - limit (int | None): Maximum number of results to return. Defaults to 25. Set to -1, float("inf") or None + workflow_version_ids: Workflow version id or list of workflow version ids to filter on. + limit: Maximum number of results to return. Defaults to 25. Set to -1, float("inf") or None Returns: - WorkflowVersionList: The requested workflow versions. + The requested workflow versions. Examples: diff --git a/cognite/client/_basic_api_client.py b/cognite/client/_basic_api_client.py index 33fc0a9d2e..111f38fefb 100644 --- a/cognite/client/_basic_api_client.py +++ b/cognite/client/_basic_api_client.py @@ -240,17 +240,17 @@ async def _request( Requires the caller to handle errors coming from non-2xx response status codes. Args: - method (Literal['GET', 'PUT', 'HEAD']): HTTP method. - full_url (str): Full URL to make the request to. - content (bytes | AsyncIterator[bytes] | None): Optional body content to send along with the request. - headers (dict[str, Any] | None): Optional headers to include in the request. - timeout (float | None): Override the default timeout for this request. - include_cdf_headers (bool): Whether to include Cognite Data Fusion headers in the request. Defaults to False. - api_subversion (str | None): When include_cdf_headers=True, override the API subversion to use for the request. Has no effect otherwise. - semaphore (asyncio.BoundedSemaphore | None): Semaphore to limit concurrent requests. Pass None for no limit. + method: HTTP method. + full_url: Full URL to make the request to. + content: Optional body content to send along with the request. + headers: Optional headers to include in the request. + timeout: Override the default timeout for this request. + include_cdf_headers: Whether to include Cognite Data Fusion headers in the request. Defaults to False. + api_subversion: When include_cdf_headers=True, override the API subversion to use for the request. Has no effect otherwise. + semaphore: Semaphore to limit concurrent requests. Pass None for no limit. Returns: - CogniteHTTPResponse: The response from the server. + The response from the server. Raises: CogniteHTTPStatusError: If the response status code is 4xx or 5xx. diff --git a/cognite/client/_cognite_client.py b/cognite/client/_cognite_client.py index 2626ba161b..8114994648 100644 --- a/cognite/client/_cognite_client.py +++ b/cognite/client/_cognite_client.py @@ -47,7 +47,7 @@ class AsyncCogniteClient: For the synchronous client, see :class:`~cognite.client._cognite_client.CogniteClient`. Args: - config (ClientConfig | None): The configuration for this client. + config: The configuration for this client. """ _API_VERSION = "v1" @@ -125,7 +125,7 @@ def version(self) -> str: """Returns the current SDK version. Returns: - str: The current SDK version + The current SDK version """ from cognite.client import __version__ @@ -136,7 +136,7 @@ def config(self) -> ClientConfig: """Returns a config object containing the configuration for the current client. Returns: - ClientConfig: The configuration object. + The configuration object. """ return self._config @@ -156,13 +156,13 @@ def default( * Base URL: "https://{cdf_cluster}.cognitedata.com/ Args: - project (str): The CDF project. - cdf_cluster (str): The CDF cluster where the CDF project is located. - credentials (CredentialProvider): Credentials. e.g. Token, ClientCredentials. - client_name (str | None): A user-defined name for the client. Used to identify the number of unique applications/scripts running on top of CDF. If this is not set, the getpass.getuser() is used instead, meaning the username you are logged in with is used. + project: The CDF project. + cdf_cluster: The CDF cluster where the CDF project is located. + credentials: Credentials. e.g. Token, ClientCredentials. + client_name: A user-defined name for the client. Used to identify the number of unique applications/scripts running on top of CDF. If this is not set, the getpass.getuser() is used instead, meaning the username you are logged in with is used. Returns: - AsyncCogniteClient: An AsyncCogniteClient instance with default configurations. + An AsyncCogniteClient instance with default configurations. """ return cls(ClientConfig.default(project, cdf_cluster, credentials, client_name=client_name)) @@ -186,15 +186,15 @@ def default_oauth_client_credentials( * Scopes: [f"https://{cdf_cluster}.cognitedata.com/.default"] Args: - project (str): The CDF project. - cdf_cluster (str): The CDF cluster where the CDF project is located. - tenant_id (str): The Azure tenant ID. - client_id (str): The Azure client ID. - client_secret (str): The Azure client secret. - client_name (str | None): A user-defined name for the client. Used to identify the number of unique applications/scripts running on top of CDF. If this is not set, the getpass.getuser() is used instead, meaning the username you are logged in with is used. + project: The CDF project. + cdf_cluster: The CDF cluster where the CDF project is located. + tenant_id: The Azure tenant ID. + client_id: The Azure client ID. + client_secret: The Azure client secret. + client_name: A user-defined name for the client. Used to identify the number of unique applications/scripts running on top of CDF. If this is not set, the getpass.getuser() is used instead, meaning the username you are logged in with is used. Returns: - AsyncCogniteClient: An AsyncCogniteClient instance with default configurations. + An AsyncCogniteClient instance with default configurations. """ credentials = OAuthClientCredentials.default_for_entra_id(tenant_id, client_id, client_secret, cdf_cluster) return cls.default(project, cdf_cluster, credentials, client_name) @@ -218,14 +218,14 @@ def default_oauth_interactive( * Scopes: [f"https://{cdf_cluster}.cognitedata.com/.default"] Args: - project (str): The CDF project. - cdf_cluster (str): The CDF cluster where the CDF project is located. - tenant_id (str): The Azure tenant ID. - client_id (str): The Azure client ID. - client_name (str | None): A user-defined name for the client. Used to identify the number of unique applications/scripts running on top of CDF. If this is not set, the getpass.getuser() is used instead, meaning the username you are logged in with is used. + project: The CDF project. + cdf_cluster: The CDF cluster where the CDF project is located. + tenant_id: The Azure tenant ID. + client_id: The Azure client ID. + client_name: A user-defined name for the client. Used to identify the number of unique applications/scripts running on top of CDF. If this is not set, the getpass.getuser() is used instead, meaning the username you are logged in with is used. Returns: - AsyncCogniteClient: An AsyncCogniteClient instance with default configurations. + An AsyncCogniteClient instance with default configurations. """ credentials = OAuthInteractive.default_for_entra_id(tenant_id, client_id, cdf_cluster) return cls.default(project, cdf_cluster, credentials, client_name) @@ -235,10 +235,10 @@ def load(cls, config: dict[str, Any] | str) -> AsyncCogniteClient: """Load a cognite client object from a YAML/JSON string or dict. Args: - config (dict[str, Any] | str): A dictionary or YAML/JSON string containing configuration values defined in the AsyncCogniteClient class. + config: A dictionary or YAML/JSON string containing configuration values defined in the AsyncCogniteClient class. Returns: - AsyncCogniteClient: A cognite client object. + A cognite client object. Examples: diff --git a/cognite/client/_sync_api/agents/agents.py b/cognite/client/_sync_api/agents/agents.py index 5a49580ba4..a8db6a6592 100644 --- a/cognite/client/_sync_api/agents/agents.py +++ b/cognite/client/_sync_api/agents/agents.py @@ -1,6 +1,6 @@ """ =============================================================================== -43a5b979901a5b2c3b5c6fdb3bd8bb67 +ae88276485e6ef32e03131d8ca280214 This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -38,10 +38,10 @@ def upsert(self, agents: AgentUpsert | Sequence[AgentUpsert]) -> Agent | AgentLi `Create or update (upsert) one or more agents. `_ Args: - agents (AgentUpsert | Sequence[AgentUpsert]): Agent or list of agents to create or update. + agents: Agent or list of agents to create or update. Returns: - Agent | AgentList: The created or updated agent(s). + The created or updated agent(s). Examples: @@ -169,11 +169,11 @@ def retrieve( `Retrieve one or more agents by external ID. `_ Args: - external_ids (str | SequenceNotStr[str]): The external id of the agent(s) to retrieve. - ignore_unknown_ids (bool): Whether to ignore unknown IDs. Defaults to False. + external_ids: The external id of the agent(s) to retrieve. + ignore_unknown_ids: Whether to ignore unknown IDs. Defaults to False. Returns: - Agent | AgentList | None: The requested agent or agent list. `None` is returned if `ignore_unknown_ids` is `True` and the external ID is not found. + The requested agent or agent list. `None` is returned if `ignore_unknown_ids` is `True` and the external ID is not found. Examples: @@ -197,8 +197,8 @@ def delete(self, external_ids: str | SequenceNotStr[str], ignore_unknown_ids: bo `Delete one or more agents. `_ Args: - external_ids (str | SequenceNotStr[str]): External ID of the agent or a list of external ids. - ignore_unknown_ids (bool): If `True`, the call will ignore unknown external IDs. Defaults to False. + external_ids: External ID of the agent or a list of external ids. + ignore_unknown_ids: If `True`, the call will ignore unknown external IDs. Defaults to False. Examples: @@ -218,7 +218,7 @@ def list(self) -> AgentList: `List agents. `_ Returns: - AgentList: The list of agents. + The list of agents. Examples: @@ -245,14 +245,13 @@ def chat( Users can ensure conversation continuity by including the cursor from the previous response in subsequent requests. Args: - agent_external_id (str): External ID that uniquely identifies the agent. - messages (Message | ActionResult | Sequence[Message | ActionResult]): A list of one or many input messages to the agent. Can include regular messages and action results. - cursor (str | None): The cursor to use for continuation of a conversation. Use this to - create multi-turn conversations, as the cursor will keep track of the conversation state. - actions (Sequence[Action] | None): A list of client-side actions that can be called by the agent. + agent_external_id: External ID that uniquely identifies the agent. + messages: A list of one or many input messages to the agent. Can include regular messages and action results. + cursor: The cursor to use for continuation of a conversation. Use this to create multi-turn conversations, as the cursor will keep track of the conversation state. + actions: A list of client-side actions that can be called by the agent. Returns: - AgentChatResponse: The response from the agent. + The response from the agent. Examples: diff --git a/cognite/client/_sync_api/ai/tools/documents.py b/cognite/client/_sync_api/ai/tools/documents.py index 1f6aaba9d3..9353c4eb0b 100644 --- a/cognite/client/_sync_api/ai/tools/documents.py +++ b/cognite/client/_sync_api/ai/tools/documents.py @@ -1,6 +1,6 @@ """ =============================================================================== -254e4fe71a376be188b61c781ea0d09e +bdac54565d8e24404b4c27e28557b38c This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -34,12 +34,12 @@ def summarize( this may be extended in the future. Args: - id (int | None): The ID of the document - external_id (str | None): The external ID of the document - instance_id (NodeId | None): The instance ID of the document + id: The ID of the document + external_id: The external ID of the document + instance_id: The instance ID of the document Returns: - Summary: A summary of the document. + A summary of the document. Examples: @@ -93,16 +93,16 @@ def ask_question( Supports up to 100 documents at a time. Args: - question (str): The question. - id (int | Sequence[int] | None): The ID(s) of the document(s) - external_id (str | Sequence[str] | None): The external ID(s) of the document(s) - instance_id (NodeId | Sequence[NodeId] | None): The instance ID(s) of the document(s) - language (AnswerLanguage | Literal['Chinese', 'Dutch', 'English', 'French', 'German', 'Italian', 'Japanese', 'Korean', 'Latvian', 'Norwegian', 'Portuguese', 'Spanish', 'Swedish']): The desired language of the answer, defaults to English. - additional_context (str | None): Additional context that you want the LLM to take into account. - ignore_unknown_ids (bool): Whether to skip documents that do not exist or that are not fully processed, instead of throwing an error. If no valid documents are found, an error will always be raised. + question: The question. + id: The ID(s) of the document(s) + external_id: The external ID(s) of the document(s) + instance_id: The instance ID(s) of the document(s) + language: The desired language of the answer, defaults to English. + additional_context: Additional context that you want the LLM to take into account. + ignore_unknown_ids: Whether to skip documents that do not exist or that are not fully processed, instead of throwing an error. If no valid documents are found, an error will always be raised. Returns: - Answer: The answer to the question in the form of a list of multiple content objects, each consisting of a chunk of text along with a set of references. + The answer to the question in the form of a list of multiple content objects, each consisting of a chunk of text along with a set of references. Examples: diff --git a/cognite/client/_sync_api/annotations.py b/cognite/client/_sync_api/annotations.py index 2fb122b7f2..a9dbc88fde 100644 --- a/cognite/client/_sync_api/annotations.py +++ b/cognite/client/_sync_api/annotations.py @@ -1,6 +1,6 @@ """ =============================================================================== -2cef22902bdcc712b1db3783a23ab21c +ca2227168e0715bbbb728abcc02cabc3 This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -41,10 +41,10 @@ def create( `Create annotations `_ Args: - annotations (Annotation | AnnotationWrite | Sequence[Annotation | AnnotationWrite]): Annotation(s) to create + annotations: Annotation(s) to create Returns: - Annotation | AnnotationList: Created annotation(s) + Created annotation(s) """ return run_sync(self.__async_client.annotations.create(annotations=annotations)) @@ -61,10 +61,10 @@ def suggest( `Suggest annotations `_ Args: - annotations (Annotation | AnnotationWrite | Sequence[Annotation] | Sequence[AnnotationWrite]): annotation(s) to suggest. They must have status set to "suggested". + annotations: annotation(s) to suggest. They must have status set to "suggested". Returns: - Annotation | AnnotationList: suggested annotation(s) + suggested annotation(s) """ return run_sync(self.__async_client.annotations.suggest(annotations=annotations)) @@ -94,11 +94,11 @@ def update( `Update annotations `_ Args: - item (Annotation | AnnotationWrite | AnnotationUpdate | Sequence[Annotation | AnnotationWrite | AnnotationUpdate]): Annotation or list of annotations to update (or patch or list of patches to apply) - mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (Annotation or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. + item: Annotation or list of annotations to update (or patch or list of patches to apply) + mode: How to update data when a non-update object is given (Annotation or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. Returns: - Annotation | AnnotationList: No description. + No description. """ return run_sync(self.__async_client.annotations.update(item=item, mode=mode)) @@ -107,7 +107,7 @@ def delete(self, id: int | Sequence[int]) -> None: `Delete annotations `_ Args: - id (int | Sequence[int]): ID or list of IDs to be deleted + id: ID or list of IDs to be deleted """ return run_sync(self.__async_client.annotations.delete(id=id)) @@ -116,10 +116,10 @@ def retrieve_multiple(self, ids: Sequence[int]) -> AnnotationList: `Retrieve annotations by IDs `_` Args: - ids (Sequence[int]): list of IDs to be retrieved + ids: list of IDs to be retrieved Returns: - AnnotationList: list of annotations + list of annotations """ return run_sync(self.__async_client.annotations.retrieve_multiple(ids=ids)) @@ -128,10 +128,10 @@ def retrieve(self, id: int) -> Annotation | None: `Retrieve an annotation by id `_ Args: - id (int): id of the annotation to be retrieved + id: id of the annotation to be retrieved Returns: - Annotation | None: annotation requested + annotation requested """ return run_sync(self.__async_client.annotations.retrieve(id=id)) @@ -140,11 +140,11 @@ def reverse_lookup(self, filter: AnnotationReverseLookupFilter, limit: int | Non Reverse lookup annotated resources based on having annotations matching the filter. Args: - filter (AnnotationReverseLookupFilter): Filter to apply - limit (int | None): Maximum number of results to return. Defaults to None (all). + filter: Filter to apply + limit: Maximum number of results to return. Defaults to None (all). Returns: - ResourceReferenceList: List of resource references + List of resource references Examples: @@ -167,11 +167,11 @@ def list(self, filter: AnnotationFilter | dict, limit: int | None = DEFAULT_LIMI Passing a filter with both 'annotated_resource_type' and 'annotated_resource_ids' is always required. Args: - filter (AnnotationFilter | dict): Return annotations with parameter values that match what is specified. - limit (int | None): Maximum number of annotations to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + filter: Return annotations with parameter values that match what is specified. + limit: Maximum number of annotations to return. Defaults to 25. Set to -1, float("inf") or None to return all items. Returns: - AnnotationList: list of annotations + list of annotations Example: diff --git a/cognite/client/_sync_api/assets.py b/cognite/client/_sync_api/assets.py index 3b784ae97e..57f6084498 100644 --- a/cognite/client/_sync_api/assets.py +++ b/cognite/client/_sync_api/assets.py @@ -1,6 +1,6 @@ """ =============================================================================== -8c673a83cc32133198ae7e6d190c587a +27d84df9580f0ca1a1b5c692145898db This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -119,29 +119,29 @@ def __call__( Fetches assets as they are iterated over, so you keep a limited number of assets in memory. Args: - chunk_size (int | None): Number of assets to return in each chunk. Defaults to yielding one asset a time. - name (str | None): Name of asset. Often referred to as tag. - parent_ids (Sequence[int] | None): Return only the direct descendants of the specified assets. - parent_external_ids (SequenceNotStr[str] | None): Return only the direct descendants of the specified assets. - asset_subtree_ids (int | Sequence[int] | None): Only include assets in subtrees rooted at any of the specified assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. - asset_subtree_external_ids (str | SequenceNotStr[str] | None): Only include assets in subtrees rooted at any of the specified assetExternalIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. - metadata (dict[str, str] | None): Custom, application specific metadata. String key -> String value - data_set_ids (int | Sequence[int] | None): Return only assets in the specified data set(s) with this id / these ids. - data_set_external_ids (str | SequenceNotStr[str] | None): Return only assets in the specified data set(s) with this external id / these external ids. - labels (LabelFilter | None): Return only the assets matching the specified label. - geo_location (GeoLocationFilter | None): Only include files matching the specified geographic relation. - source (str | None): The source of this asset - created_time (TimestampRange | dict[str, Any] | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. - last_updated_time (TimestampRange | dict[str, Any] | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. - root (bool | None): filtered assets are root assets or not - external_id_prefix (str | None): Filter by this (case-sensitive) prefix for the external ID. - aggregated_properties (Sequence[AggregateAssetProperty] | None): Set of aggregated properties to include. Options are childCount, path, depth. - limit (int | None): Maximum number of assets to return. Defaults to return all items. - advanced_filter (Filter | dict[str, Any] | None): Advanced filter query using the filter DSL (Domain Specific Language). It allows defining complex filtering expressions that combine simple operations, such as equals, prefix, exists, etc., using boolean operators and, or, and not. - sort (SortSpec | list[SortSpec] | None): The criteria to sort by. Defaults to desc for `_score_` and asc for all other properties. Sort is not allowed if `partitions` is used. + chunk_size: Number of assets to return in each chunk. Defaults to yielding one asset a time. + name: Name of asset. Often referred to as tag. + parent_ids: Return only the direct descendants of the specified assets. + parent_external_ids: Return only the direct descendants of the specified assets. + asset_subtree_ids: Only include assets in subtrees rooted at any of the specified assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. + asset_subtree_external_ids: Only include assets in subtrees rooted at any of the specified assetExternalIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. + metadata: Custom, application specific metadata. String key -> String value + data_set_ids: Return only assets in the specified data set(s) with this id / these ids. + data_set_external_ids: Return only assets in the specified data set(s) with this external id / these external ids. + labels: Return only the assets matching the specified label. + geo_location: Only include files matching the specified geographic relation. + source: The source of this asset + created_time: Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. + last_updated_time: Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. + root: filtered assets are root assets or not + external_id_prefix: Filter by this (case-sensitive) prefix for the external ID. + aggregated_properties: Set of aggregated properties to include. Options are childCount, path, depth. + limit: Maximum number of assets to return. Defaults to return all items. + advanced_filter: Advanced filter query using the filter DSL (Domain Specific Language). It allows defining complex filtering expressions that combine simple operations, such as equals, prefix, exists, etc., using boolean operators and, or, and not. + sort: The criteria to sort by. Defaults to desc for `_score_` and asc for all other properties. Sort is not allowed if `partitions` is used. Yields: - Asset | AssetList: yields Asset one by one if chunk_size is not specified, else AssetList objects. + yields Asset one by one if chunk_size is not specified, else AssetList objects. """ # noqa: DOC404 yield from SyncIterator( self.__async_client.assets( @@ -173,11 +173,11 @@ def retrieve(self, id: int | None = None, external_id: str | None = None) -> Ass `Retrieve a single asset by id. `_ Args: - id (int | None): ID - external_id (str | None): External ID + id: ID + external_id: External ID Returns: - Asset | None: Requested asset or None if it does not exist. + Requested asset or None if it does not exist. Examples: @@ -204,12 +204,12 @@ def retrieve_multiple( `Retrieve multiple assets by id. `_ Args: - ids (Sequence[int] | None): IDs - external_ids (SequenceNotStr[str] | None): External IDs - ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception. + ids: IDs + external_ids: External IDs + ignore_unknown_ids: Ignore IDs and external IDs that are not found rather than throw an exception. Returns: - AssetList: The requested assets. + The requested assets. Examples: @@ -240,12 +240,12 @@ def aggregate_count( `Count of assets matching the specified filters. `_ Args: - property (AssetPropertyLike | None): If specified, get an approximate number of asset with a specific property (property is not null) and matching the filters. - advanced_filter (Filter | dict[str, Any] | None): The advanced filter to narrow down the assets to count. - filter (AssetFilter | dict[str, Any] | None): The filter to narrow down the assets to count (strict matching). + property: If specified, get an approximate number of asset with a specific property (property is not null) and matching the filters. + advanced_filter: The advanced filter to narrow down the assets to count. + filter: The filter to narrow down the assets to count (strict matching). Returns: - int: The number of assets matching the specified filters. + The number of assets matching the specified filters. Examples: @@ -280,12 +280,12 @@ def aggregate_cardinality_values( `Find approximate property count for assets. `_ Args: - property (AssetPropertyLike): The property to count the cardinality of. - advanced_filter (Filter | dict[str, Any] | None): The advanced filter to narrow down assets. - aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets. - filter (AssetFilter | dict[str, Any] | None): The filter to narrow down assets (strict matching). + property: The property to count the cardinality of. + advanced_filter: The advanced filter to narrow down assets. + aggregate_filter: The filter to apply to the resulting buckets. + filter: The filter to narrow down assets (strict matching). Returns: - int: The number of properties matching the specified filters and search. + The number of properties matching the specified filters and search. Examples: @@ -323,13 +323,12 @@ def aggregate_cardinality_properties( `Find approximate paths count for assets. `_ Args: - path (AssetPropertyLike): The scope in every document to aggregate properties. The only value allowed now is ["metadata"]. - It means to aggregate only metadata properties (aka keys). - advanced_filter (Filter | dict[str, Any] | None): The advanced filter to narrow down assets. - aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets. - filter (AssetFilter | dict[str, Any] | None): The filter to narrow down assets (strict matching). + path: The scope in every document to aggregate properties. The only value allowed now is ["metadata"]. It means to aggregate only metadata properties (aka keys). + advanced_filter: The advanced filter to narrow down assets. + aggregate_filter: The filter to apply to the resulting buckets. + filter: The filter to narrow down assets (strict matching). Returns: - int: The number of properties matching the specified filters. + The number of properties matching the specified filters. Examples: @@ -361,13 +360,13 @@ def aggregate_unique_values( In the case of text fields, the values are aggregated in a case-insensitive manner. Args: - property (AssetPropertyLike): The property to group by. - advanced_filter (Filter | dict[str, Any] | None): The advanced filter to narrow down assets. - aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets. - filter (AssetFilter | dict[str, Any] | None): The filter to narrow down assets (strict matching). + property: The property to group by. + advanced_filter: The advanced filter to narrow down assets. + aggregate_filter: The filter to apply to the resulting buckets. + filter: The filter to narrow down assets (strict matching). Returns: - UniqueResultList: List of unique values of assets matching the specified filters and search. + List of unique values of assets matching the specified filters and search. Examples: @@ -421,14 +420,13 @@ def aggregate_unique_properties( In the case of text fields, the values are aggregated in a case-insensitive manner. Args: - path (AssetPropertyLike): The scope in every document to aggregate properties. The only value allowed now is ["metadata"]. - It means to aggregate only metadata properties (aka keys). - advanced_filter (Filter | dict[str, Any] | None): The advanced filter to narrow down assets. - aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets. - filter (AssetFilter | dict[str, Any] | None): The filter to narrow down assets (strict matching). + path: The scope in every document to aggregate properties. The only value allowed now is ["metadata"]. It means to aggregate only metadata properties (aka keys). + advanced_filter: The advanced filter to narrow down assets. + aggregate_filter: The filter to apply to the resulting buckets. + filter: The filter to narrow down assets (strict matching). Returns: - UniqueResultList: List of unique values of assets matching the specified filters and search. + List of unique values of assets matching the specified filters and search. Examples: @@ -460,10 +458,10 @@ def create(self, asset: Asset | AssetWrite | Sequence[Asset] | Sequence[AssetWri When specifying parent-child relation between assets using `parentExternalId` the link will be resvoled into an internal ID and stored as `parentId`. Args: - asset (Asset | AssetWrite | Sequence[Asset] | Sequence[AssetWrite]): Asset or list of assets to create. + asset: Asset or list of assets to create. Returns: - Asset | AssetList: Created asset(s) + Created asset(s) Examples: @@ -500,12 +498,12 @@ def create_hierarchy( assets, so you may pass zero, one or many (same goes for the non-root assets). Args: - assets (Sequence[AssetWrite] | AssetHierarchy): List of assets to create or an instance of AssetHierarchy. - upsert (bool): If used, already existing assets will be updated instead of an exception being raised. You may control how updates are applied with the 'upsert_mode' argument. - upsert_mode (Literal['patch', 'replace']): Only applicable with upsert. Pass 'patch' to only update fields with non-null values (default), or 'replace' to do full updates (unset fields become null or empty). + assets: List of assets to create or an instance of AssetHierarchy. + upsert: If used, already existing assets will be updated instead of an exception being raised. You may control how updates are applied with the 'upsert_mode' argument. + upsert_mode: Only applicable with upsert. Pass 'patch' to only update fields with non-null values (default), or 'replace' to do full updates (unset fields become null or empty). Returns: - AssetList: Created (and possibly updated) asset hierarchy + Created (and possibly updated) asset hierarchy Prior to insertion, this function will run validation on the given assets and raise an error if any of the following issues are found: @@ -624,10 +622,10 @@ def delete( `Delete one or more assets `_ Args: - id (int | Sequence[int] | None): Id or list of ids - external_id (str | SequenceNotStr[str] | None): External ID or list of external ids - recursive (bool): Recursively delete whole asset subtrees under given ids. Defaults to False. - ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception. + id: Id or list of ids + external_id: External ID or list of external ids + recursive: Recursively delete whole asset subtrees under given ids. Defaults to False. + ignore_unknown_ids: Ignore IDs and external IDs that are not found rather than throw an exception. Examples: @@ -668,10 +666,10 @@ def update( Labels can be added, removed or replaced (set). Note that set operation deletes all the existing labels and adds the new specified labels. Args: - item (Asset | AssetWrite | AssetUpdate | Sequence[Asset | AssetWrite | AssetUpdate]): Asset(s) to update - mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (Asset or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. + item: Asset(s) to update + mode: How to update data when a non-update object is given (Asset or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. Returns: - Asset | AssetList: Updated asset(s) + Updated asset(s) Examples: Perform a partial update on an asset, updating the description and adding a new field to metadata: @@ -733,11 +731,11 @@ def upsert( For more details, see :ref:`appendix-upsert`. Args: - item (Asset | AssetWrite | Sequence[Asset | AssetWrite]): Asset or list of assets to upsert. - mode (Literal['patch', 'replace']): Whether to patch or replace in the case the assets are existing. If you set 'patch', the call will only update fields with non-null values (default). Setting 'replace' will unset any fields that are not specified. + item: Asset or list of assets to upsert. + mode: Whether to patch or replace in the case the assets are existing. If you set 'patch', the call will only update fields with non-null values (default). Setting 'replace' will unset any fields that are not specified. Returns: - Asset | AssetList: The upserted asset(s). + The upserted asset(s). Examples: @@ -767,14 +765,14 @@ def search( Primarily meant for human-centric use-cases and data exploration, not for programs, since matching and ordering may change over time. Use the `list` function if stable or exact matches are required. Args: - name (str | None): Fuzzy match on name. - description (str | None): Fuzzy match on description. - query (str | None): Whitespace-separated terms to search for in assets. Does a best-effort fuzzy search in relevant fields (currently name and description) for variations of any of the search terms, and orders results by relevance. - filter (AssetFilter | dict[str, Any] | None): Filter to apply. Performs exact match on these fields. - limit (int): Maximum number of results to return. + name: Fuzzy match on name. + description: Fuzzy match on description. + query: Whitespace-separated terms to search for in assets. Does a best-effort fuzzy search in relevant fields (currently name and description) for variations of any of the search terms, and orders results by relevance. + filter: Filter to apply. Performs exact match on these fields. + limit: Maximum number of results to return. Returns: - AssetList: List of requested assets + List of requested assets Examples: @@ -815,12 +813,12 @@ def retrieve_subtree( Retrieve the subtree for this asset up to a specified depth. Args: - id (int | None): Id of the root asset in the subtree. - external_id (str | None): External id of the root asset in the subtree. - depth (int | None): Retrieve assets up to this depth below the root asset in the subtree. Omit to get the entire subtree. + id: Id of the root asset in the subtree. + external_id: External id of the root asset in the subtree. + depth: Retrieve assets up to this depth below the root asset in the subtree. Omit to get the entire subtree. Returns: - AssetList: The requested assets or empty AssetList if asset does not exist. + The requested assets or empty AssetList if asset does not exist. """ return run_sync(self.__async_client.assets.retrieve_subtree(id=id, external_id=external_id, depth=depth)) @@ -851,29 +849,29 @@ def list( `List assets `_ Args: - name (str | None): Name of asset. Often referred to as tag. - parent_ids (Sequence[int] | None): Return only the direct descendants of the specified assets. - parent_external_ids (SequenceNotStr[str] | None): Return only the direct descendants of the specified assets. - asset_subtree_ids (int | Sequence[int] | None): Only include assets in subtrees rooted at any of the specified assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. - asset_subtree_external_ids (str | SequenceNotStr[str] | None): Only include assets in subtrees rooted at any of the specified assetExternalIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. - data_set_ids (int | Sequence[int] | None): Return only assets in the specified data set(s) with this id / these ids. - data_set_external_ids (str | SequenceNotStr[str] | None): Return only assets in the specified data set(s) with this external id / these external ids. - labels (LabelFilter | None): Return only the assets matching the specified label filter. - geo_location (GeoLocationFilter | None): Only include files matching the specified geographic relation. - metadata (dict[str, str] | None): Custom, application specific metadata. String key -> String value. - source (str | None): The source of this asset. - created_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. - last_updated_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. - root (bool | None): filtered assets are root assets or not. - external_id_prefix (str | None): Filter by this (case-sensitive) prefix for the external ID. - aggregated_properties (Sequence[AggregateAssetProperty] | None): Set of aggregated properties to include. Options are childCount, path, depth. - partitions (int | None): Retrieve resources in parallel using this number of workers (values up to 10 allowed), limit must be set to `None` (or `-1`). - limit (int | None): Maximum number of assets to return. Defaults to 25. Set to -1, float("inf") or None to return all items. - advanced_filter (Filter | dict[str, Any] | None): Advanced filter query using the filter DSL (Domain Specific Language). It allows defining complex filtering expressions that combine simple operations, such as equals, prefix, exists, etc., using boolean operators and, or, and not. See examples below for usage. - sort (SortSpec | list[SortSpec] | None): The criteria to sort by. Defaults to desc for `_score_` and asc for all other properties. Sort is not allowed if `partitions` is used. + name: Name of asset. Often referred to as tag. + parent_ids: Return only the direct descendants of the specified assets. + parent_external_ids: Return only the direct descendants of the specified assets. + asset_subtree_ids: Only include assets in subtrees rooted at any of the specified assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. + asset_subtree_external_ids: Only include assets in subtrees rooted at any of the specified assetExternalIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. + data_set_ids: Return only assets in the specified data set(s) with this id / these ids. + data_set_external_ids: Return only assets in the specified data set(s) with this external id / these external ids. + labels: Return only the assets matching the specified label filter. + geo_location: Only include files matching the specified geographic relation. + metadata: Custom, application specific metadata. String key -> String value. + source: The source of this asset. + created_time: Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. + last_updated_time: Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. + root: filtered assets are root assets or not. + external_id_prefix: Filter by this (case-sensitive) prefix for the external ID. + aggregated_properties: Set of aggregated properties to include. Options are childCount, path, depth. + partitions: Retrieve resources in parallel using this number of workers (values up to 10 allowed), limit must be set to `None` (or `-1`). + limit: Maximum number of assets to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + advanced_filter: Advanced filter query using the filter DSL (Domain Specific Language). It allows defining complex filtering expressions that combine simple operations, such as equals, prefix, exists, etc., using boolean operators and, or, and not. See examples below for usage. + sort: The criteria to sort by. Defaults to desc for `_score_` and asc for all other properties. Sort is not allowed if `partitions` is used. Returns: - AssetList: List of requested assets + List of requested assets .. note:: When using `partitions`, there are few considerations to keep in mind: diff --git a/cognite/client/_sync_api/data_modeling/containers.py b/cognite/client/_sync_api/data_modeling/containers.py index 65d7c36876..72b7d51c92 100644 --- a/cognite/client/_sync_api/data_modeling/containers.py +++ b/cognite/client/_sync_api/data_modeling/containers.py @@ -1,6 +1,6 @@ """ =============================================================================== -d57abc9833694355385bcbacb104127c +e6697f9b9905ef77a9421ed7ae863c32 This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -59,13 +59,13 @@ def __call__( Fetches containers as they are iterated over, so you keep a limited number of containers in memory. Args: - chunk_size (int | None): Number of containers to return in each chunk. Defaults to yielding one container a time. - space (str | None): The space to query. - include_global (bool): Whether the global containers should be returned. - limit (int | None): Maximum number of containers to return. Defaults to returning all items. + chunk_size: Number of containers to return in each chunk. Defaults to yielding one container a time. + space: The space to query. + include_global: Whether the global containers should be returned. + limit: Maximum number of containers to return. Defaults to returning all items. Yields: - Container | ContainerList: yields Container one by one if chunk_size is not specified, else ContainerList objects. + yields Container one by one if chunk_size is not specified, else ContainerList objects. """ # noqa: DOC404 yield from SyncIterator( self.__async_client.data_modeling.containers( @@ -84,10 +84,10 @@ def retrieve(self, ids: ContainerIdentifier | Sequence[ContainerIdentifier]) -> `Retrieve one or more container by id(s). `_ Args: - ids (ContainerIdentifier | Sequence[ContainerIdentifier]): Identifier for container(s). + ids: Identifier for container(s). Returns: - Container | ContainerList | None: Requested container or None if it does not exist. + Requested container or None if it does not exist. Examples: @@ -109,9 +109,9 @@ def delete(self, ids: ContainerIdentifier | Sequence[ContainerIdentifier]) -> li `Delete one or more containers `_ Args: - ids (ContainerIdentifier | Sequence[ContainerIdentifier]): The container identifier(s). + ids: The container identifier(s). Returns: - list[ContainerId]: The container(s) which has been deleted. Empty list if nothing was deleted. + The container(s) which has been deleted. Empty list if nothing was deleted. Examples: Delete containers by id: @@ -128,9 +128,9 @@ def delete_constraints(self, ids: Sequence[ConstraintIdentifier]) -> list[Constr `Delete one or more constraints `_ Args: - ids (Sequence[ConstraintIdentifier]): The constraint identifier(s). + ids: The constraint identifier(s). Returns: - list[ConstraintIdentifier]: The constraints(s) which have been deleted. + The constraints(s) which have been deleted. Examples: Delete constraints by id: @@ -149,9 +149,9 @@ def delete_indexes(self, ids: Sequence[IndexIdentifier]) -> list[IndexIdentifier `Delete one or more indexes `_ Args: - ids (Sequence[IndexIdentifier]): The index identifier(s). + ids: The index identifier(s). Returns: - list[IndexIdentifier]: The indexes(s) which has been deleted. + The indexes(s) which has been deleted. Examples: Delete indexes by id: @@ -175,12 +175,12 @@ def list( `List containers `_ Args: - space (str | None): The space to query - limit (int | None): Maximum number of containers to return. Defaults to 10. Set to -1, float("inf") or None to return all items. - include_global (bool): Whether the global containers should be returned. + space: The space to query + limit: Maximum number of containers to return. Defaults to 10. Set to -1, float("inf") or None to return all items. + include_global: Whether the global containers should be returned. Returns: - ContainerList: List of requested containers + List of requested containers Examples: @@ -216,10 +216,10 @@ def apply(self, container: ContainerApply | Sequence[ContainerApply]) -> Contain `Add or update (upsert) containers. `_ Args: - container (ContainerApply | Sequence[ContainerApply]): Container(s) to create or update. + container: Container(s) to create or update. Returns: - Container | ContainerList: Created container(s) + Created container(s) Examples: diff --git a/cognite/client/_sync_api/data_modeling/data_models.py b/cognite/client/_sync_api/data_modeling/data_models.py index 0b754b7999..93fdaf89b6 100644 --- a/cognite/client/_sync_api/data_modeling/data_models.py +++ b/cognite/client/_sync_api/data_modeling/data_models.py @@ -1,6 +1,6 @@ """ =============================================================================== -4084f19e9d60803a93ffa88c0ac39ad0 +52353e1e28eda5b7a29b93e791af60fa This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -69,15 +69,15 @@ def __call__( Fetches data model as they are iterated over, so you keep a limited number of data model in memory. Args: - chunk_size (int | None): Number of data model to return in each chunk. Defaults to yielding one data_model a time. - limit (int | None): Maximum number of data model to return. Defaults to returning all items. - space (str | None): The space to query. - inline_views (bool): Whether to expand the referenced views inline in the returned result. - all_versions (bool): Whether to return all versions. If false, only the newest version is returned, which is determined based on the 'createdTime' field. - include_global (bool): Whether to include global views. + chunk_size: Number of data model to return in each chunk. Defaults to yielding one data_model a time. + limit: Maximum number of data model to return. Defaults to returning all items. + space: The space to query. + inline_views: Whether to expand the referenced views inline in the returned result. + all_versions: Whether to return all versions. If false, only the newest version is returned, which is determined based on the 'createdTime' field. + include_global: Whether to include global views. Yields: - DataModel | DataModelList: yields DataModel one by one if chunk_size is not specified, else DataModelList objects. + yields DataModel one by one if chunk_size is not specified, else DataModelList objects. """ # noqa: DOC404 yield from SyncIterator( self.__async_client.data_modeling.data_models( @@ -107,11 +107,11 @@ def retrieve( `Retrieve data_model(s) by id(s). `_ Args: - ids (DataModelIdentifier | Sequence[DataModelIdentifier]): Data Model identifier(s). - inline_views (bool): Whether to expand the referenced views inline in the returned result. + ids: Data Model identifier(s). + inline_views: Whether to expand the referenced views inline in the returned result. Returns: - DataModelList[ViewId] | DataModelList[View]: Requested data model(s) or empty if none exist. + Requested data model(s) or empty if none exist. Examples: @@ -131,9 +131,9 @@ def delete(self, ids: DataModelIdentifier | Sequence[DataModelIdentifier]) -> li `Delete one or more data model `_ Args: - ids (DataModelIdentifier | Sequence[DataModelIdentifier]): Data Model identifier(s). + ids: Data Model identifier(s). Returns: - list[DataModelId]: The data_model(s) which has been deleted. None if nothing was deleted. + The data_model(s) which has been deleted. None if nothing was deleted. Examples: Delete data model by id: @@ -177,14 +177,14 @@ def list( `List data models `_ Args: - inline_views (bool): Whether to expand the referenced views inline in the returned result. - limit (int | None): Maximum number of data model to return. Defaults to 10. Set to -1, float("inf") or None to return all items. - space (str | None): The space to query. - all_versions (bool): Whether to return all versions. If false, only the newest version is returned, which is determined based on the 'createdTime' field. - include_global (bool): Whether to include global data models. + inline_views: Whether to expand the referenced views inline in the returned result. + limit: Maximum number of data model to return. Defaults to 10. Set to -1, float("inf") or None to return all items. + space: The space to query. + all_versions: Whether to return all versions. If false, only the newest version is returned, which is determined based on the 'createdTime' field. + include_global: Whether to include global data models. Returns: - DataModelList[View] | DataModelList[ViewId]: List of requested data models + List of requested data models Examples: @@ -226,10 +226,10 @@ def apply(self, data_model: DataModelApply | Sequence[DataModelApply]) -> DataMo `Create or update one or more data model. `_ Args: - data_model (DataModelApply | Sequence[DataModelApply]): Data model(s) to create or update (upsert). + data_model: Data model(s) to create or update (upsert). Returns: - DataModel | DataModelList: Created data model(s) + Created data model(s) Examples: diff --git a/cognite/client/_sync_api/data_modeling/graphql.py b/cognite/client/_sync_api/data_modeling/graphql.py index 42de32af04..9a8b101003 100644 --- a/cognite/client/_sync_api/data_modeling/graphql.py +++ b/cognite/client/_sync_api/data_modeling/graphql.py @@ -1,6 +1,6 @@ """ =============================================================================== -4c797ab98ba8663bd1d57f352855c2ef +1cddc46d83897d442813705f021107c0 This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -30,10 +30,10 @@ def _unsafely_wipe_and_regenerate_dml(self, id: DataModelIdentifier) -> str: This removes all comments from the DML. Args: - id (DataModelIdentifier): The data model to apply DML to. + id: The data model to apply DML to. Returns: - str: The new DML + The new DML """ return run_sync(self.__async_client.data_modeling.graphql._unsafely_wipe_and_regenerate_dml(id=id)) @@ -49,14 +49,14 @@ def apply_dml( Apply the DML for a given data model. Args: - id (DataModelIdentifier): The data model to apply DML to. - dml (str): The DML to apply. - name (str | None): The name of the data model. - description (str | None): The description of the data model. - previous_version (str | None): The previous version of the data model. Specify to reuse view versions from previous data model version. + id: The data model to apply DML to. + dml: The DML to apply. + name: The name of the data model. + description: The description of the data model. + previous_version: The previous version of the data model. Specify to reuse view versions from previous data model version. Returns: - DMLApplyResult: The id of the updated data model. + The id of the updated data model. Examples: @@ -83,12 +83,12 @@ def query(self, id: DataModelIdentifier, query: str, variables: dict[str, Any] | Execute a GraphQl query against a given data model. Args: - id (DataModelIdentifier): The data model to query. - query (str): The query to issue. - variables (dict[str, Any] | None): An optional dict of variables to pass to the query. + id: The data model to query. + query: The query to issue. + variables: An optional dict of variables to pass to the query. Returns: - dict[str, Any]: The query result + The query result Examples: diff --git a/cognite/client/_sync_api/data_modeling/instances.py b/cognite/client/_sync_api/data_modeling/instances.py index cdd4cad368..6e07a7da90 100644 --- a/cognite/client/_sync_api/data_modeling/instances.py +++ b/cognite/client/_sync_api/data_modeling/instances.py @@ -1,6 +1,6 @@ """ =============================================================================== -e9ec879b211d48e1aa0e9d7c871a6f05 +faeefd4645cd7c3acfbdfc0d81b11f88 This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -130,19 +130,19 @@ def __call__( Fetches instances as they are iterated over, so you keep a limited number of instances in memory. Args: - chunk_size (int | None): Number of data_models to return in each chunk. Defaults to yielding one instance at a time. - instance_type (Literal['node', 'edge']): Whether to query for nodes or edges. - limit (int | None): Maximum number of instances to return. Defaults to returning all items. - include_typing (bool): Whether to return property type information as part of the result. - sources (Source | Sequence[Source] | None): Views to retrieve properties from. - space (str | SequenceNotStr[str] | None): Only return instances in the given space (or list of spaces). - sort (list[InstanceSort | dict] | InstanceSort | dict | None): Sort(s) to apply to the returned instances. For nontrivial amounts of data, you need to have a backing, cursorable index. - filter (Filter | dict[str, Any] | None): Advanced filtering of instances. - debug (DebugParameters | None): Debug settings for profiling and troubleshooting. + chunk_size: Number of data_models to return in each chunk. Defaults to yielding one instance at a time. + instance_type: Whether to query for nodes or edges. + limit: Maximum number of instances to return. Defaults to returning all items. + include_typing: Whether to return property type information as part of the result. + sources: Views to retrieve properties from. + space: Only return instances in the given space (or list of spaces). + sort: Sort(s) to apply to the returned instances. For nontrivial amounts of data, you need to have a backing, cursorable index. + filter: Advanced filtering of instances. + debug: Debug settings for profiling and troubleshooting. Yields: - Edge | EdgeList | Node | NodeList: yields Instance one by one if chunk_size is not specified, else NodeList/EdgeList objects. - """ + yields Instance one by one if chunk_size is not specified, else NodeList/EdgeList objects. + """ # noqa: DOC404 yield from SyncIterator( self.__async_client.data_modeling.instances( chunk_size=chunk_size, @@ -200,13 +200,13 @@ def retrieve_edges( Args: - edges (EdgeId | Sequence[EdgeId] | tuple[str, str] | Sequence[tuple[str, str]]): Edge id(s) to retrieve. - edge_cls (type[T_Edge]): The custom edge class to use, the retrieved edges will automatically be serialized into this class. - sources (Source | Sequence[Source] | None): Retrieve properties from the listed - by reference - views. This only applies if you do not provide a custom edge class. - include_typing (bool): Whether to include typing information + edges: Edge id(s) to retrieve. + edge_cls: The custom edge class to use, the retrieved edges will automatically be serialized into this class. + sources: Retrieve properties from the listed - by reference - views. This only applies if you do not provide a custom edge class. + include_typing: Whether to include typing information Returns: - EdgeList[T_Edge] | T_Edge | Edge | None: The requested edges. + The requested edges. Examples: @@ -296,13 +296,13 @@ def retrieve_nodes( built-in Node class. Args: - nodes (NodeId | Sequence[NodeId] | tuple[str, str] | Sequence[tuple[str, str]]): Node id(s) to retrieve. - node_cls (type[T_Node]): The custom node class to use, the retrieved nodes will automatically be serialized to this class. - sources (Source | Sequence[Source] | None): Retrieve properties from the listed - by reference - views. This only applies if you do not provide a custom node class. - include_typing (bool): Whether to include typing information + nodes: Node id(s) to retrieve. + node_cls: The custom node class to use, the retrieved nodes will automatically be serialized to this class. + sources: Retrieve properties from the listed - by reference - views. This only applies if you do not provide a custom node class. + include_typing: Whether to include typing information Returns: - NodeList[T_Node] | T_Node | Node | None: The requested edges. + The requested edges. Examples: @@ -367,13 +367,13 @@ def retrieve( `Retrieve one or more instance by id(s). `_ Args: - nodes (NodeId | Sequence[NodeId] | tuple[str, str] | Sequence[tuple[str, str]] | None): Node ids - edges (EdgeId | Sequence[EdgeId] | tuple[str, str] | Sequence[tuple[str, str]] | None): Edge ids - sources (Source | Sequence[Source] | None): Retrieve properties from the listed - by reference - views. - include_typing (bool): Whether to return property type information as part of the result. + nodes: Node ids + edges: Edge ids + sources: Retrieve properties from the listed - by reference - views. + include_typing: Whether to return property type information as part of the result. Returns: - InstancesResult[Node, Edge]: Requested instances. + Requested instances. Examples: @@ -418,11 +418,11 @@ def delete( `Delete one or more instances `_ Args: - nodes (NodeId | Sequence[NodeId] | tuple[str, str] | Sequence[tuple[str, str]] | None): Node ids - edges (EdgeId | Sequence[EdgeId] | tuple[str, str] | Sequence[tuple[str, str]] | None): Edge ids + nodes: Node ids + edges: Edge ids Returns: - InstancesDeleteResult: The instance ID(s) that was deleted. Empty list if nothing was deleted. + The instance ID(s) that was deleted. Empty list if nothing was deleted. Examples: @@ -461,13 +461,13 @@ def inspect( This method will return the involved views and containers for the given nodes and edges. Args: - nodes (NodeId | Sequence[NodeId] | tuple[str, str] | Sequence[tuple[str, str]] | None): Node IDs. - edges (EdgeId | Sequence[EdgeId] | tuple[str, str] | Sequence[tuple[str, str]] | None): Edge IDs. - involved_views (InvolvedViews | None): Whether to include involved views. Must pass at least one of involved_views or involved_containers. - involved_containers (InvolvedContainers | None): Whether to include involved containers. Must pass at least one of involved_views or involved_containers. + nodes: Node IDs. + edges: Edge IDs. + involved_views: Whether to include involved views. Must pass at least one of involved_views or involved_containers. + involved_containers: Whether to include involved containers. Must pass at least one of involved_views or involved_containers. Returns: - InstanceInspectResults: List of instance inspection results. + List of instance inspection results. Examples: @@ -513,13 +513,13 @@ def subscribe( see :ref:`this example of syncing instances to a local SQLite database `. Args: - query (QuerySync): The query to subscribe to. - callback (Callable[[QueryResult], None | Awaitable[None]]): The callback function to call when the result set changes. Can be a regular or async function. - poll_delay_seconds (float): The time to wait between polls when no data is present. Defaults to 30 seconds. - throttle_seconds (float): The time to wait between polls despite data being present. + query: The query to subscribe to. + callback: The callback function to call when the result set changes. Can be a regular or async function. + poll_delay_seconds: The time to wait between polls when no data is present. Defaults to 30 seconds. + throttle_seconds: The time to wait between polls despite data being present. Returns: - SubscriptionContext: An object that can be used to inspect and cancel the subscription. + An object that can be used to inspect and cancel the subscription. Examples: @@ -569,15 +569,16 @@ def apply( `Add or update (upsert) instances. `_ Args: - nodes (NodeApply | Sequence[NodeApply] | None): Nodes to apply - edges (EdgeApply | Sequence[EdgeApply] | None): Edges to apply - auto_create_start_nodes (bool): Whether to create missing start nodes for edges when ingesting. By default, the start node of an edge must exist before it can be ingested. - auto_create_end_nodes (bool): Whether to create missing end nodes for edges when ingesting. By default, the end node of an edge must exist before it can be ingested. - auto_create_direct_relations (bool): Whether to create missing direct relation targets when ingesting. - skip_on_version_conflict (bool): If existingVersion is specified on any of the nodes/edges in the input, the default behaviour is that the entire ingestion will fail when version conflicts occur. If skipOnVersionConflict is set to true, items with version conflicts will be skipped instead. If no version is specified for nodes/edges, it will do the writing directly. - replace (bool): How do we behave when a property value exists? Do we replace all matching and existing values with the supplied values (true)? Or should we merge in new values for properties together with the existing values (false)? Note: This setting applies for all nodes or edges specified in the ingestion call. + nodes: Nodes to apply + edges: Edges to apply + auto_create_start_nodes: Whether to create missing start nodes for edges when ingesting. By default, the start node of an edge must exist before it can be ingested. + auto_create_end_nodes: Whether to create missing end nodes for edges when ingesting. By default, the end node of an edge must exist before it can be ingested. + auto_create_direct_relations: Whether to create missing direct relation targets when ingesting. + skip_on_version_conflict: If existingVersion is specified on any of the nodes/edges in the input, the default behaviour is that the entire ingestion will fail when version conflicts occur. If skipOnVersionConflict is set to true, items with version conflicts will be skipped instead. If no version is specified for nodes/edges, it will do the writing directly. + replace: How do we behave when a property value exists? Do we replace all matching and existing values with the supplied values (true)? Or should we merge in new values for properties together with the existing values (false)? Note: This setting applies for all nodes or edges specified in the ingestion call. + Returns: - InstancesApplyResult: Created instance(s) + Created instance(s) Examples: @@ -765,24 +766,20 @@ def search( `Search instances `_ Args: - view (ViewId): View to search in. - query (str | None): Query string that will be parsed and used for search. - instance_type (Literal['node', 'edge'] | type[T_Node] | type[T_Edge]): Whether to search for nodes or edges. You can also pass a custom typed node (or edge class) inheriting from TypedNode (or TypedEdge). See apply, retrieve_nodes or retrieve_edges for an example. - properties (list[str] | None): Optional array of properties you want to search through. If you do not specify one or more properties, the service will search all text fields within the view. - target_units (list[TargetUnit] | None): Properties to convert to another unit. The API can only convert to another unit if a unit has been defined as part of the type on the underlying container being queried. - space (str | SequenceNotStr[str] | None): Restrict instance search to the given space (or list of spaces). - filter (Filter | dict[str, Any] | None): Advanced filtering of instances. - include_typing (bool): Whether to include typing information. - limit (int | None): Maximum number of instances to return. Defaults to 25. Will return the maximum number - of results (1000) if set to None, -1, or math.inf. - sort (Sequence[InstanceSort | dict] | InstanceSort | dict | None): How you want the listed instances information ordered. - operator (Literal['AND', 'OR']): Controls how multiple search terms are combined when matching documents. - AND (default): A document matches only if it contains all of the query terms across the searchable fields. - This typically returns fewer results but with higher relevance. OR: A document matches if it contains any - of the query terms in the searchable fields. This typically returns more results but with lower precision. + view: View to search in. + query: Query string that will be parsed and used for search. + instance_type: Whether to search for nodes or edges. You can also pass a custom typed node (or edge class) inheriting from TypedNode (or TypedEdge). See apply, retrieve_nodes or retrieve_edges for an example. + properties: Optional array of properties you want to search through. If you do not specify one or more properties, the service will search all text fields within the view. + target_units: Properties to convert to another unit. The API can only convert to another unit if a unit has been defined as part of the type on the underlying container being queried. + space: Restrict instance search to the given space (or list of spaces). + filter: Advanced filtering of instances. + include_typing: Whether to include typing information. + limit: Maximum number of instances to return. Defaults to 25. Will return the maximum number of results (1000) if set to None, -1, or math.inf. + sort: How you want the listed instances information ordered. + operator: Controls how multiple search terms are combined when matching documents. AND (default): A document matches only if it contains all of the query terms across the searchable fields. This typically returns fewer results but with higher relevance. OR: A document matches if it contains any of the query terms in the searchable fields. This typically returns more results but with lower precision. Returns: - NodeList[T_Node] | EdgeList[T_Edge]: Search result with matching nodes or edges. + Search result with matching nodes or edges. Examples: @@ -894,20 +891,19 @@ def aggregate( `Aggregate data across nodes/edges `_ Args: - view (ViewId): View to aggregate over. - aggregates (MetricAggregation | dict | Sequence[MetricAggregation | dict]): The properties to aggregate over. - group_by (str | SequenceNotStr[str] | None): The selection of fields to group the results by when doing aggregations. You can specify up to 5 items to group by. - instance_type (Literal['node', 'edge']): The type of instance. - query (str | None): Optional query string. The API will parse the query string, and use it to match the text properties on elements to use for the aggregate(s). - properties (str | SequenceNotStr[str] | None): Optional list of properties you want to apply the query to. If you do not list any properties, you search through text fields by default. - target_units (list[TargetUnit] | None): Properties to convert to another unit. The API can only convert to another unit if a unit has been defined as part of the type on the underlying container being queried. - space (str | SequenceNotStr[str] | None): Restrict instance aggregate query to the given space (or list of spaces). - filter (Filter | dict[str, Any] | None): Advanced filtering of instances. - limit (int | None): Maximum number of instances to return. Defaults to 25. Will return the maximum number - of results (1000) if set to None, -1, or math.inf. + view: View to aggregate over. + aggregates: The properties to aggregate over. + group_by: The selection of fields to group the results by when doing aggregations. You can specify up to 5 items to group by. + instance_type: The type of instance. + query: Optional query string. The API will parse the query string, and use it to match the text properties on elements to use for the aggregate(s). + properties: Optional list of properties you want to apply the query to. If you do not list any properties, you search through text fields by default. + target_units: Properties to convert to another unit. The API can only convert to another unit if a unit has been defined as part of the type on the underlying container being queried. + space: Restrict instance aggregate query to the given space (or list of spaces). + filter: Advanced filtering of instances. + limit: Maximum number of instances to return. Defaults to 25. Will return the maximum number of results (1000) if set to None, -1, or math.inf. Returns: - AggregatedNumberedValue | list[AggregatedNumberedValue] | InstanceAggregationResultList: Node or edge aggregation results. + Node or edge aggregation results. Examples: @@ -981,18 +977,18 @@ def histogram( `Produces histograms for nodes/edges `_ Args: - view (ViewId): View to to aggregate over. - histograms (Histogram | Sequence[Histogram]): The properties to aggregate over. - instance_type (Literal['node', 'edge']): Whether to search for nodes or edges. - query (str | None): Query string that will be parsed and used for search. - properties (SequenceNotStr[str] | None): Optional array of properties you want to search through. If you do not specify one or more properties, the service will search all text fields within the view. - target_units (list[TargetUnit] | None): Properties to convert to another unit. The API can only convert to another unit if a unit has been defined as part of the type on the underlying container being queried. - space (str | SequenceNotStr[str] | None): Restrict histogram query to instances in the given space (or list of spaces). - filter (Filter | dict[str, Any] | None): Advanced filtering of instances. - limit (int): Maximum number of instances to return. Defaults to 25. + view: View to to aggregate over. + histograms: The properties to aggregate over. + instance_type: Whether to search for nodes or edges. + query: Query string that will be parsed and used for search. + properties: Optional array of properties you want to search through. If you do not specify one or more properties, the service will search all text fields within the view. + target_units: Properties to convert to another unit. The API can only convert to another unit if a unit has been defined as part of the type on the underlying container being queried. + space: Restrict histogram query to instances in the given space (or list of spaces). + filter: Advanced filtering of instances. + limit: Maximum number of instances to return. Defaults to 25. Returns: - HistogramValue | list[HistogramValue]: Node or edge aggregation results. + Node or edge aggregation results. Examples: @@ -1029,12 +1025,12 @@ def query(self, query: Query, include_typing: bool = False, debug: DebugParamete recursive edge traversal, chaining of result sets, and granular property selection. Args: - query (Query): Query. - include_typing (bool): Should we return property type information as part of the result? - debug (DebugParameters | None): Debug settings for profiling and troubleshooting. + query: Query. + include_typing: Should we return property type information as part of the result? + debug: Debug settings for profiling and troubleshooting. Returns: - QueryResult: The resulting nodes and/or edges from the query. + The resulting nodes and/or edges from the query. Examples: @@ -1104,12 +1100,12 @@ def sync(self, query: QuerySync, include_typing: bool = False, debug: DebugParam Subscribe to changes for nodes and edges in a project, matching a supplied filter. Args: - query (QuerySync): Query. - include_typing (bool): Should we return property type information as part of the result? - debug (DebugParameters | None): Debug settings for profiling and troubleshooting. + query: Query. + include_typing: Should we return property type information as part of the result? + debug: Debug settings for profiling and troubleshooting. Returns: - QueryResult: The resulting nodes and/or edges from the query. + The resulting nodes and/or edges from the query. Examples: @@ -1223,17 +1219,17 @@ def list( `List instances `_ Args: - instance_type (Literal['node', 'edge'] | type[T_Node] | type[T_Edge]): Whether to query for nodes or edges. You can also pass a custom typed node (or edge class) inheriting from TypedNode (or TypedEdge). See apply, retrieve_nodes or retrieve_edges for an example. - include_typing (bool): Whether to return property type information as part of the result. - sources (Source | Sequence[Source] | None): Views to retrieve properties from. - space (str | SequenceNotStr[str] | None): Only return instances in the given space (or list of spaces). - limit (int | None): Maximum number of instances to return. Defaults to 25. Set to -1, float("inf") or None to return all items. - sort (Sequence[InstanceSort | dict] | InstanceSort | dict | None): How you want the listed instances information ordered. - filter (Filter | dict[str, Any] | None): Advanced filtering of instances. - debug (DebugParameters | None): Debug settings for profiling and troubleshooting. + instance_type: Whether to query for nodes or edges. You can also pass a custom typed node (or edge class) inheriting from TypedNode (or TypedEdge). See apply, retrieve_nodes or retrieve_edges for an example. + include_typing: Whether to return property type information as part of the result. + sources: Views to retrieve properties from. + space: Only return instances in the given space (or list of spaces). + limit: Maximum number of instances to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + sort: How you want the listed instances information ordered. + filter: Advanced filtering of instances. + debug: Debug settings for profiling and troubleshooting. Returns: - NodeList[T_Node] | EdgeList[T_Edge]: List of requested instances + List of requested instances Examples: diff --git a/cognite/client/_sync_api/data_modeling/space_statistics.py b/cognite/client/_sync_api/data_modeling/space_statistics.py index d899b0a544..712c2dd12d 100644 --- a/cognite/client/_sync_api/data_modeling/space_statistics.py +++ b/cognite/client/_sync_api/data_modeling/space_statistics.py @@ -1,6 +1,6 @@ """ =============================================================================== -4fc58be818b3c0f31dea52d2572478db +80ea3aaf1408dd46dd0ae7a885ef098e This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -36,10 +36,10 @@ def retrieve(self, space: str | SequenceNotStr[str]) -> SpaceStatistics | SpaceS `Retrieve usage data and limits per space `_ Args: - space (str | SequenceNotStr[str]): The space or spaces to retrieve statistics for. + space: The space or spaces to retrieve statistics for. Returns: - SpaceStatistics | SpaceStatisticsList | None: The requested statistics and limits for the specified space(s). + The requested statistics and limits for the specified space(s). Examples: @@ -64,7 +64,7 @@ def list(self) -> SpaceStatisticsList: Returns statistics for data modeling resources grouped by each space in the project. Returns: - SpaceStatisticsList: The requested statistics and limits for all spaces in the project. + The requested statistics and limits for all spaces in the project. Examples: diff --git a/cognite/client/_sync_api/data_modeling/spaces.py b/cognite/client/_sync_api/data_modeling/spaces.py index ef6a9c32c3..d29fefbf1b 100644 --- a/cognite/client/_sync_api/data_modeling/spaces.py +++ b/cognite/client/_sync_api/data_modeling/spaces.py @@ -1,6 +1,6 @@ """ =============================================================================== -1d3c923b3952a8590be74f358715ca93 +a8571b42863ad0dfe06d8f428ff5b844 This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -42,11 +42,11 @@ def __call__( Fetches spaces as they are iterated over, so you keep a limited number of spaces in memory. Args: - chunk_size (int | None): Number of spaces to return in each chunk. Defaults to yielding one space a time. - limit (int | None): Maximum number of spaces to return. Defaults to returning all items. + chunk_size: Number of spaces to return in each chunk. Defaults to yielding one space a time. + limit: Maximum number of spaces to return. Defaults to returning all items. Yields: - Space | SpaceList: yields Space one by one if chunk_size is not specified, else SpaceList objects. + yields Space one by one if chunk_size is not specified, else SpaceList objects. """ # noqa: DOC404 yield from SyncIterator(self.__async_client.data_modeling.spaces(chunk_size=chunk_size, limit=limit)) # type: ignore [misc] @@ -61,10 +61,10 @@ def retrieve(self, spaces: str | SequenceNotStr[str]) -> Space | SpaceList | Non `Retrieve one or more spaces. `_ Args: - spaces (str | SequenceNotStr[str]): Space ID + spaces: Space ID Returns: - Space | SpaceList | None: Requested space or None if it does not exist. + Requested space or None if it does not exist. Examples: @@ -84,9 +84,9 @@ def delete(self, spaces: str | SequenceNotStr[str]) -> list[str]: `Delete one or more spaces `_ Args: - spaces (str | SequenceNotStr[str]): ID or ID list ids of spaces. + spaces: ID or ID list ids of spaces. Returns: - list[str]: The space(s) which has been deleted. + The space(s) which has been deleted. Examples: Delete spaces by id: @@ -103,11 +103,11 @@ def list(self, limit: int | None = DEFAULT_LIMIT_READ, include_global: bool = Fa `List spaces `_ Args: - limit (int | None): Maximum number of spaces to return. Defaults to 10. Set to -1, float("inf") or None to return all items. - include_global (bool): Whether to include global spaces. Defaults to False. + limit: Maximum number of spaces to return. Defaults to 10. Set to -1, float("inf") or None to return all items. + include_global: Whether to include global spaces. Defaults to False. Returns: - SpaceList: List of requested spaces + List of requested spaces Examples: @@ -141,10 +141,10 @@ def apply(self, spaces: SpaceApply | Sequence[SpaceApply]) -> Space | SpaceList: `Create or patch one or more spaces. `_ Args: - spaces (SpaceApply | Sequence[SpaceApply]): Space | Sequence[Space]): Space or spaces of spacesda to create or update. + spaces: Space or spaces of spacesda to create or update. Returns: - Space | SpaceList: Created space(s) + Created space(s) Examples: diff --git a/cognite/client/_sync_api/data_modeling/statistics.py b/cognite/client/_sync_api/data_modeling/statistics.py index 5e7dad4f04..e620838fcf 100644 --- a/cognite/client/_sync_api/data_modeling/statistics.py +++ b/cognite/client/_sync_api/data_modeling/statistics.py @@ -1,6 +1,6 @@ """ =============================================================================== -7450cb501a5fe150b43354f5aa1448a4 +57a1fb8046c7953c621c19285433859e This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -33,7 +33,7 @@ def project(self) -> ProjectStatistics: Returns the usage data and limits for a project's data modelling usage, including data model schemas and graph instances Returns: - ProjectStatistics: The requested statistics and limits + The requested statistics and limits Examples: diff --git a/cognite/client/_sync_api/data_modeling/views.py b/cognite/client/_sync_api/data_modeling/views.py index f2fbc48c0e..3a715c2d46 100644 --- a/cognite/client/_sync_api/data_modeling/views.py +++ b/cognite/client/_sync_api/data_modeling/views.py @@ -1,6 +1,6 @@ """ =============================================================================== -0a26300de7d736aa782c880e1002e02e +ccc30965f9b4fb2373f005996eb78293 This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -64,15 +64,15 @@ def __call__( Fetches views as they are iterated over, so you keep a limited number of views in memory. Args: - chunk_size (int | None): Number of views to return in each chunk. Defaults to yielding one view at a time. - limit (int | None): Maximum number of views to return. Defaults to returning all items. - space (str | None): (str | None): The space to query. - include_inherited_properties (bool): Whether to include properties inherited from views this view implements. - all_versions (bool): Whether to return all versions. If false, only the newest version is returned, which is determined based on the 'createdTime' field. - include_global (bool): Whether to include global views. + chunk_size: Number of views to return in each chunk. Defaults to yielding one view at a time. + limit: Maximum number of views to return. Defaults to returning all items. + space: The space to query. + include_inherited_properties: Whether to include properties inherited from views this view implements. + all_versions: Whether to return all versions. If false, only the newest version is returned, which is determined based on the 'createdTime' field. + include_global: Whether to include global views. Yields: - View | ViewList: yields View one by one if chunk_size is not specified, else ViewList objects. + yields View one by one if chunk_size is not specified, else ViewList objects. """ # noqa: DOC404 yield from SyncIterator( self.__async_client.data_modeling.views( @@ -95,15 +95,12 @@ def retrieve( `Retrieve a single view by id. `_ Args: - ids (ViewIdentifier | Sequence[ViewIdentifier]): The view identifier(s). This can be given as a tuple of - strings or a ViewId object. For example, ("my_space", "my_view"), ("my_space", "my_view", "my_version"), - or ViewId("my_space", "my_view", "my_version"). Note that version is optional, if not provided, all versions - will be returned. - include_inherited_properties (bool): Whether to include properties inherited from views this view implements. - all_versions (bool): Whether to return all versions. If false, only the newest version is returned (based on created_time) + ids: The view identifier(s). This can be given as a tuple of strings or a ViewId object. For example, ("my_space", "my_view"), ("my_space", "my_view", "my_version"), or ViewId("my_space", "my_view", "my_version"). Note that version is optional, if not provided, all versions will be returned. + include_inherited_properties: Whether to include properties inherited from views this view implements. + all_versions: Whether to return all versions. If false, only the newest version is returned (based on created_time) Returns: - ViewList: Requested view or None if it does not exist. + Requested view or None if it does not exist. Examples: @@ -123,9 +120,9 @@ def delete(self, ids: ViewIdentifier | Sequence[ViewIdentifier]) -> list[ViewId] `Delete one or more views `_ Args: - ids (ViewIdentifier | Sequence[ViewIdentifier]): View identifier(s) + ids: View identifier(s) Returns: - list[ViewId]: The identifier for the view(s) which has been deleted. Empty list if nothing was deleted. + The identifier for the view(s) which has been deleted. Empty list if nothing was deleted. Examples: Delete views by id: @@ -149,14 +146,14 @@ def list( `List views `_ Args: - limit (int | None): Maximum number of views to return. Defaults to 10. Set to -1, float("inf") or None to return all items. - space (str | None): (str | None): The space to query. - include_inherited_properties (bool): Whether to include properties inherited from views this view implements. - all_versions (bool): Whether to return all versions. If false, only the newest version is returned, which is determined based on the 'createdTime' field. - include_global (bool): Whether to include global views. + limit: Maximum number of views to return. Defaults to 10. Set to -1, float("inf") or None to return all items. + space: The space to query. + include_inherited_properties: Whether to include properties inherited from views this view implements. + all_versions: Whether to return all versions. If false, only the newest version is returned, which is determined based on the 'createdTime' field. + include_global: Whether to include global views. Returns: - ViewList: List of requested views + List of requested views Examples: @@ -198,10 +195,10 @@ def apply(self, view: ViewApply | Sequence[ViewApply]) -> View | ViewList: `Create or update (upsert) one or more views. `_ Args: - view (ViewApply | Sequence[ViewApply]): View(s) to create or update. + view: View(s) to create or update. Returns: - View | ViewList: Created view(s) + Created view(s) Examples: diff --git a/cognite/client/_sync_api/data_sets.py b/cognite/client/_sync_api/data_sets.py index f90e1cad65..4bf777379f 100644 --- a/cognite/client/_sync_api/data_sets.py +++ b/cognite/client/_sync_api/data_sets.py @@ -1,6 +1,6 @@ """ =============================================================================== -310cad50f9c91bbd89f81c41a70f8c3c +ec95dfd737e0dcd7ad2e24564f72278c This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -67,16 +67,16 @@ def __call__( Fetches data sets as they are iterated over, so you keep a limited number of data sets in memory. Args: - chunk_size (int | None): Number of data sets to return in each chunk. Defaults to yielding one data set a time. - metadata (dict[str, str] | None): Custom, application-specific metadata. String key -> String value. - created_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. - last_updated_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. - external_id_prefix (str | None): Filter by this (case-sensitive) prefix for the external ID. - write_protected (bool | None): Specify whether the filtered data sets are write-protected, or not. Set to True to only list write-protected data sets. - limit (int | None): Maximum number of data sets to return. Defaults to return all items. + chunk_size: Number of data sets to return in each chunk. Defaults to yielding one data set a time. + metadata: Custom, application-specific metadata. String key -> String value. + created_time: Range between two timestamps. + last_updated_time: Range between two timestamps. + external_id_prefix: Filter by this (case-sensitive) prefix for the external ID. + write_protected: Specify whether the filtered data sets are write-protected, or not. Set to True to only list write-protected data sets. + limit: Maximum number of data sets to return. Defaults to return all items. Yields: - DataSet | DataSetList: yields DataSet one by one if chunk is not specified, else DataSetList objects. + yields DataSet one by one if chunk is not specified, else DataSetList objects. """ # noqa: DOC404 yield from SyncIterator( self.__async_client.data_sets( @@ -103,10 +103,10 @@ def create( `Create one or more data sets. `_ Args: - data_set (DataSet | DataSetWrite | Sequence[DataSet] | Sequence[DataSetWrite]): Union[DataSet, Sequence[DataSet]]: Data set or list of data sets to create. + data_set: Union[DataSet, Sequence[DataSet]]: Data set or list of data sets to create. Returns: - DataSet | DataSetList: Created data set(s) + Created data set(s) Examples: @@ -126,11 +126,11 @@ def retrieve(self, id: int | None = None, external_id: str | None = None) -> Dat `Retrieve a single data set by id. `_ Args: - id (int | None): ID - external_id (str | None): External ID + id: ID + external_id: External ID Returns: - DataSet | None: Requested data set or None if it does not exist. + Requested data set or None if it does not exist. Examples: @@ -157,12 +157,12 @@ def retrieve_multiple( `Retrieve multiple data sets by id. `_ Args: - ids (Sequence[int] | None): IDs - external_ids (SequenceNotStr[str] | None): External IDs - ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception. + ids: IDs + external_ids: External IDs + ignore_unknown_ids: Ignore IDs and external IDs that are not found rather than throw an exception. Returns: - DataSetList: The requested data sets. + The requested data sets. Examples: @@ -188,10 +188,10 @@ def aggregate_count(self, filter: DataSetFilter | dict[str, Any] | None = None) `Aggregate data sets `_ Args: - filter (DataSetFilter | dict[str, Any] | None): Filter on data set filter with exact match + filter: Filter on data set filter with exact match Returns: - int: Count of data sets matching the filter. + Count of data sets matching the filter. Examples: @@ -229,11 +229,11 @@ def update( `Update one or more data sets `_ Args: - item (DataSet | DataSetWrite | DataSetUpdate | Sequence[DataSet | DataSetWrite | DataSetUpdate]): Data set(s) to update - mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (DataSet or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. + item: Data set(s) to update + mode: How to update data when a non-update object is given (DataSet or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. Returns: - DataSet | DataSetList: Updated data set(s) + Updated data set(s) Examples: @@ -267,15 +267,15 @@ def list( `List data sets `_ Args: - metadata (dict[str, str] | None): Custom, application-specific metadata. String key -> String value. - created_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. - last_updated_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. - external_id_prefix (str | None): Filter by this (case-sensitive) prefix for the external ID. - write_protected (bool | None): Specify whether the filtered data sets are write-protected, or not. Set to True to only list write-protected data sets. - limit (int | None): Maximum number of data sets to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + metadata: Custom, application-specific metadata. String key -> String value. + created_time: Range between two timestamps. + last_updated_time: Range between two timestamps. + external_id_prefix: Filter by this (case-sensitive) prefix for the external ID. + write_protected: Specify whether the filtered data sets are write-protected, or not. Set to True to only list write-protected data sets. + limit: Maximum number of data sets to return. Defaults to 25. Set to -1, float("inf") or None to return all items. Returns: - DataSetList: List of requested data sets + List of requested data sets Examples: diff --git a/cognite/client/_sync_api/datapoints.py b/cognite/client/_sync_api/datapoints.py index 119eba0b24..1afd02bd10 100644 --- a/cognite/client/_sync_api/datapoints.py +++ b/cognite/client/_sync_api/datapoints.py @@ -1,6 +1,6 @@ """ =============================================================================== -1e36d088389fdaf7a128366d487c0c02 +69aa45134c293d355e4d00c3f7d0463d This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -101,13 +101,13 @@ def __call__( No empty chunk is ever returned. Args: - queries (DatapointsQuery | Sequence[DatapointsQuery]): Query, or queries, using id, external_id or instance_id for the time series to fetch data for, with individual settings specified. The options 'limit' and 'include_outside_points' are not supported when iterating. - chunk_size_datapoints (int): The number of datapoints per time series to yield per iteration. Must evenly divide 100k OR be an integer multiple of 100k. Default: 100_000. - chunk_size_time_series (int | None): The max number of time series to yield per iteration (varies as time series get exhausted, but is never empty). Default: None (all given queries are iterated at the same time). - return_arrays (bool): Whether to return the datapoints as numpy arrays. Default: True. + queries: Query, or queries, using id, external_id or instance_id for the time series to fetch data for, with individual settings specified. The options 'limit' and 'include_outside_points' are not supported when iterating. + chunk_size_datapoints: The number of datapoints per time series to yield per iteration. Must evenly divide 100k OR be an integer multiple of 100k. Default: 100_000. + chunk_size_time_series: The max number of time series to yield per iteration (varies as time series get exhausted, but is never empty). Default: None (all given queries are iterated at the same time). + return_arrays: Whether to return the datapoints as numpy arrays. Default: True. Yields: - DatapointsArray | DatapointsArrayList | Datapoints | DatapointsList: If return_arrays=True, a ``DatapointsArray`` object containing the datapoints chunk, or a ``DatapointsArrayList`` if multiple time series were asked for. When False, a ``Datapoints`` object containing the datapoints chunk, or a ``DatapointsList`` if multiple time series were asked for. + If return_arrays=True, a ``DatapointsArray`` object containing the datapoints chunk, or a ``DatapointsArrayList`` if multiple time series were asked for. When False, a ``Datapoints`` object containing the datapoints chunk, or a ``DatapointsList`` if multiple time series were asked for. Examples: @@ -427,25 +427,25 @@ def retrieve( `status codes. `_ Args: - id (None | int | DatapointsQuery | Sequence[int | DatapointsQuery]): Id, dict (with id) or (mixed) sequence of these. See examples below. - external_id (None | str | DatapointsQuery | SequenceNotStr[str | DatapointsQuery]): External id, dict (with external id) or (mixed) sequence of these. See examples below. - instance_id (None | NodeId | DatapointsQuery | Sequence[NodeId | DatapointsQuery]): Instance id or sequence of instance ids. - start (int | str | datetime.datetime | None): Inclusive start. Default: 1970-01-01 UTC. - end (int | str | datetime.datetime | None): Exclusive end. Default: "now" - aggregates (Aggregate | str | list[Aggregate | str] | None): Single aggregate or list of aggregates to retrieve. Available options: ``average``, ``continuous_variance``, ``count``, ``count_bad``, ``count_good``, ``count_uncertain``, ``discrete_variance``, ``duration_bad``, ``duration_good``, ``duration_uncertain``, ``interpolation``, ``max``, ``max_datapoint``, ``min``, ``min_datapoint``, ``step_interpolation``, ``sum`` and ``total_variation``. Default: None (raw datapoints returned) - granularity (str | None): The granularity to fetch aggregates at. Can be given as an abbreviation or spelled out for clarity: ``s/second(s)``, ``m/minute(s)``, ``h/hour(s)``, ``d/day(s)``, ``w/week(s)``, ``mo/month(s)``, ``q/quarter(s)``, or ``y/year(s)``. Examples: ``30s``, ``5m``, ``1day``, ``2weeks``. Default: None. - timezone (str | datetime.timezone | ZoneInfo | None): For raw datapoints, which timezone to use when displaying (will not affect what is retrieved). For aggregates, which timezone to align to for granularity 'hour' and longer. Align to the start of the hour, day or month. For timezones of type Region/Location, like 'Europe/Oslo', pass a string or ``ZoneInfo`` instance. The aggregate duration will then vary, typically due to daylight saving time. You can also use a fixed offset from UTC by passing a string like '+04:00', 'UTC-7' or 'UTC-02:30' or an instance of ``datetime.timezone``. Note: Historical timezones with second offset are not supported, and timezones with minute offsets (e.g. UTC+05:30 or Asia/Kolkata) may take longer to execute. - target_unit (str | None): The unit_external_id of the datapoints returned. If the time series does not have a unit_external_id that can be converted to the target_unit, an error will be returned. Cannot be used with target_unit_system. - target_unit_system (str | None): The unit system of the datapoints returned. Cannot be used with target_unit. - limit (int | None): Maximum number of datapoints to return for each time series. Default: None (no limit) - include_outside_points (bool): Whether to include outside points. Not allowed when fetching aggregates. Default: False - ignore_unknown_ids (bool): Whether to ignore missing time series rather than raising an exception. Default: False - include_status (bool): Also return the status code, an integer, for each datapoint in the response. Only relevant for raw datapoint queries, and the object aggregates ``min_datapoint`` and ``max_datapoint``. - ignore_bad_datapoints (bool): Treat datapoints with a bad status code as if they do not exist. If set to false, raw queries will include bad datapoints in the response, and aggregates will in general omit the time period between a bad datapoint and the next good datapoint. Also, the period between a bad datapoint and the previous good datapoint will be considered constant. Default: True. - treat_uncertain_as_bad (bool): Treat datapoints with uncertain status codes as bad. If false, treat datapoints with uncertain status codes as good. Used for both raw queries and aggregates. Default: True. + id: Id, dict (with id) or (mixed) sequence of these. See examples below. + external_id: External id, dict (with external id) or (mixed) sequence of these. See examples below. + instance_id: Instance id or sequence of instance ids. + start: Inclusive start. Default: 1970-01-01 UTC. + end: Exclusive end. Default: "now" + aggregates: Single aggregate or list of aggregates to retrieve. Available options: ``average``, ``continuous_variance``, ``count``, ``count_bad``, ``count_good``, ``count_uncertain``, ``discrete_variance``, ``duration_bad``, ``duration_good``, ``duration_uncertain``, ``interpolation``, ``max``, ``max_datapoint``, ``min``, ``min_datapoint``, ``step_interpolation``, ``sum`` and ``total_variation``. Default: None (raw datapoints returned) + granularity: The granularity to fetch aggregates at. Can be given as an abbreviation or spelled out for clarity: ``s/second(s)``, ``m/minute(s)``, ``h/hour(s)``, ``d/day(s)``, ``w/week(s)``, ``mo/month(s)``, ``q/quarter(s)``, or ``y/year(s)``. Examples: ``30s``, ``5m``, ``1day``, ``2weeks``. Default: None. + timezone: For raw datapoints, which timezone to use when displaying (will not affect what is retrieved). For aggregates, which timezone to align to for granularity 'hour' and longer. Align to the start of the hour, day or month. For timezones of type Region/Location, like 'Europe/Oslo', pass a string or ``ZoneInfo`` instance. The aggregate duration will then vary, typically due to daylight saving time. You can also use a fixed offset from UTC by passing a string like '+04:00', 'UTC-7' or 'UTC-02:30' or an instance of ``datetime.timezone``. Note: Historical timezones with second offset are not supported, and timezones with minute offsets (e.g. UTC+05:30 or Asia/Kolkata) may take longer to execute. + target_unit: The unit_external_id of the datapoints returned. If the time series does not have a unit_external_id that can be converted to the target_unit, an error will be returned. Cannot be used with target_unit_system. + target_unit_system: The unit system of the datapoints returned. Cannot be used with target_unit. + limit: Maximum number of datapoints to return for each time series. Default: None (no limit) + include_outside_points: Whether to include outside points. Not allowed when fetching aggregates. Default: False + ignore_unknown_ids: Whether to ignore missing time series rather than raising an exception. Default: False + include_status: Also return the status code, an integer, for each datapoint in the response. Only relevant for raw datapoint queries, and the object aggregates ``min_datapoint`` and ``max_datapoint``. + ignore_bad_datapoints: Treat datapoints with a bad status code as if they do not exist. If set to false, raw queries will include bad datapoints in the response, and aggregates will in general omit the time period between a bad datapoint and the next good datapoint. Also, the period between a bad datapoint and the previous good datapoint will be considered constant. Default: True. + treat_uncertain_as_bad: Treat datapoints with uncertain status codes as bad. If false, treat datapoints with uncertain status codes as good. Used for both raw queries and aggregates. Default: True. Returns: - Datapoints | DatapointsList | None: A ``Datapoints`` object containing the requested data, or a ``DatapointsList`` if multiple time series were asked for (the ordering is ids first, then external_ids). If `ignore_unknown_ids` is `True`, a single time series is requested and it is not found, the function will return `None`. + A ``Datapoints`` object containing the requested data, or a ``DatapointsList`` if multiple time series were asked for (the ordering is ids first, then external_ids). If `ignore_unknown_ids` is `True`, a single time series is requested and it is not found, the function will return `None`. Examples: @@ -773,25 +773,25 @@ def retrieve_arrays( `status codes. `_ Args: - id (None | int | DatapointsQuery | Sequence[int | DatapointsQuery]): Id, dict (with id) or (mixed) sequence of these. See examples below. - external_id (None | str | DatapointsQuery | SequenceNotStr[str | DatapointsQuery]): External id, dict (with external id) or (mixed) sequence of these. See examples below. - instance_id (None | NodeId | DatapointsQuery | Sequence[NodeId | DatapointsQuery]): Instance id or sequence of instance ids. - start (int | str | datetime.datetime | None): Inclusive start. Default: 1970-01-01 UTC. - end (int | str | datetime.datetime | None): Exclusive end. Default: "now" - aggregates (Aggregate | str | list[Aggregate | str] | None): Single aggregate or list of aggregates to retrieve. Available options: ``average``, ``continuous_variance``, ``count``, ``count_bad``, ``count_good``, ``count_uncertain``, ``discrete_variance``, ``duration_bad``, ``duration_good``, ``duration_uncertain``, ``interpolation``, ``max``, ``max_datapoint``, ``min``, ``min_datapoint``, ``step_interpolation``, ``sum`` and ``total_variation``. Default: None (raw datapoints returned) - granularity (str | None): The granularity to fetch aggregates at. Can be given as an abbreviation or spelled out for clarity: ``s/second(s)``, ``m/minute(s)``, ``h/hour(s)``, ``d/day(s)``, ``w/week(s)``, ``mo/month(s)``, ``q/quarter(s)``, or ``y/year(s)``. Examples: ``30s``, ``5m``, ``1day``, ``2weeks``. Default: None. - timezone (str | datetime.timezone | ZoneInfo | None): For raw datapoints, which timezone to use when displaying (will not affect what is retrieved). For aggregates, which timezone to align to for granularity 'hour' and longer. Align to the start of the hour, day or month. For timezones of type Region/Location, like 'Europe/Oslo', pass a string or ``ZoneInfo`` instance. The aggregate duration will then vary, typically due to daylight saving time. You can also use a fixed offset from UTC by passing a string like '+04:00', 'UTC-7' or 'UTC-02:30' or an instance of ``datetime.timezone``. Note: Historical timezones with second offset are not supported, and timezones with minute offsets (e.g. UTC+05:30 or Asia/Kolkata) may take longer to execute. - target_unit (str | None): The unit_external_id of the datapoints returned. If the time series does not have a unit_external_id that can be converted to the target_unit, an error will be returned. Cannot be used with target_unit_system. - target_unit_system (str | None): The unit system of the datapoints returned. Cannot be used with target_unit. - limit (int | None): Maximum number of datapoints to return for each time series. Default: None (no limit) - include_outside_points (bool): Whether to include outside points. Not allowed when fetching aggregates. Default: False - ignore_unknown_ids (bool): Whether to ignore missing time series rather than raising an exception. Default: False - include_status (bool): Also return the status code, an integer, for each datapoint in the response. Only relevant for raw datapoint queries, and the object aggregates ``min_datapoint`` and ``max_datapoint``. - ignore_bad_datapoints (bool): Treat datapoints with a bad status code as if they do not exist. If set to false, raw queries will include bad datapoints in the response, and aggregates will in general omit the time period between a bad datapoint and the next good datapoint. Also, the period between a bad datapoint and the previous good datapoint will be considered constant. Default: True. - treat_uncertain_as_bad (bool): Treat datapoints with uncertain status codes as bad. If false, treat datapoints with uncertain status codes as good. Used for both raw queries and aggregates. Default: True. + id: Id, dict (with id) or (mixed) sequence of these. See examples below. + external_id: External id, dict (with external id) or (mixed) sequence of these. See examples below. + instance_id: Instance id or sequence of instance ids. + start: Inclusive start. Default: 1970-01-01 UTC. + end: Exclusive end. Default: "now" + aggregates: Single aggregate or list of aggregates to retrieve. Available options: ``average``, ``continuous_variance``, ``count``, ``count_bad``, ``count_good``, ``count_uncertain``, ``discrete_variance``, ``duration_bad``, ``duration_good``, ``duration_uncertain``, ``interpolation``, ``max``, ``max_datapoint``, ``min``, ``min_datapoint``, ``step_interpolation``, ``sum`` and ``total_variation``. Default: None (raw datapoints returned) + granularity: The granularity to fetch aggregates at. Can be given as an abbreviation or spelled out for clarity: ``s/second(s)``, ``m/minute(s)``, ``h/hour(s)``, ``d/day(s)``, ``w/week(s)``, ``mo/month(s)``, ``q/quarter(s)``, or ``y/year(s)``. Examples: ``30s``, ``5m``, ``1day``, ``2weeks``. Default: None. + timezone: For raw datapoints, which timezone to use when displaying (will not affect what is retrieved). For aggregates, which timezone to align to for granularity 'hour' and longer. Align to the start of the hour, day or month. For timezones of type Region/Location, like 'Europe/Oslo', pass a string or ``ZoneInfo`` instance. The aggregate duration will then vary, typically due to daylight saving time. You can also use a fixed offset from UTC by passing a string like '+04:00', 'UTC-7' or 'UTC-02:30' or an instance of ``datetime.timezone``. Note: Historical timezones with second offset are not supported, and timezones with minute offsets (e.g. UTC+05:30 or Asia/Kolkata) may take longer to execute. + target_unit: The unit_external_id of the datapoints returned. If the time series does not have a unit_external_id that can be converted to the target_unit, an error will be returned. Cannot be used with target_unit_system. + target_unit_system: The unit system of the datapoints returned. Cannot be used with target_unit. + limit: Maximum number of datapoints to return for each time series. Default: None (no limit) + include_outside_points: Whether to include outside points. Not allowed when fetching aggregates. Default: False + ignore_unknown_ids: Whether to ignore missing time series rather than raising an exception. Default: False + include_status: Also return the status code, an integer, for each datapoint in the response. Only relevant for raw datapoint queries, and the object aggregates ``min_datapoint`` and ``max_datapoint``. + ignore_bad_datapoints: Treat datapoints with a bad status code as if they do not exist. If set to false, raw queries will include bad datapoints in the response, and aggregates will in general omit the time period between a bad datapoint and the next good datapoint. Also, the period between a bad datapoint and the previous good datapoint will be considered constant. Default: True. + treat_uncertain_as_bad: Treat datapoints with uncertain status codes as bad. If false, treat datapoints with uncertain status codes as good. Used for both raw queries and aggregates. Default: True. Returns: - DatapointsArray | DatapointsArrayList | None: A ``DatapointsArray`` object containing the requested data, or a ``DatapointsArrayList`` if multiple time series were asked for (the ordering is ids first, then external_ids). If `ignore_unknown_ids` is `True`, a single time series is requested and it is not found, the function will return `None`. + A ``DatapointsArray`` object containing the requested data, or a ``DatapointsArrayList`` if multiple time series were asked for (the ordering is ids first, then external_ids). If `ignore_unknown_ids` is `True`, a single time series is requested and it is not found, the function will return `None`. Note: For many more usage examples, check out the :py:meth:`~DatapointsAPI.retrieve` method which accepts exactly the same arguments. @@ -900,29 +900,29 @@ def retrieve_dataframe( For many more usage examples, check out the :py:meth:`~DatapointsAPI.retrieve` method which accepts exactly the same arguments. Args: - id (None | int | DatapointsQuery | Sequence[int | DatapointsQuery]): Id, DatapointsQuery or (mixed) sequence of these. See examples. - external_id (None | str | DatapointsQuery | SequenceNotStr[str | DatapointsQuery]): External id, DatapointsQuery or (mixed) sequence of these. See examples. - instance_id (None | NodeId | DatapointsQuery | Sequence[NodeId | DatapointsQuery]): Instance id, DatapointsQuery or (mixed) sequence of these. See examples. - start (int | str | datetime.datetime | None): Inclusive start. Default: 1970-01-01 UTC. - end (int | str | datetime.datetime | None): Exclusive end. Default: "now" - aggregates (Aggregate | str | list[Aggregate | str] | None): Single aggregate or list of aggregates to retrieve. Available options: ``average``, ``continuous_variance``, ``count``, ``count_bad``, ``count_good``, ``count_uncertain``, ``discrete_variance``, ``duration_bad``, ``duration_good``, ``duration_uncertain``, ``interpolation``, ``max``, ``max_datapoint``, ``min``, ``min_datapoint``, ``step_interpolation``, ``sum`` and ``total_variation``. Default: None (raw datapoints returned) - granularity (str | None): The granularity to fetch aggregates at. Can be given as an abbreviation or spelled out for clarity: ``s/second(s)``, ``m/minute(s)``, ``h/hour(s)``, ``d/day(s)``, ``w/week(s)``, ``mo/month(s)``, ``q/quarter(s)``, or ``y/year(s)``. Examples: ``30s``, ``5m``, ``1day``, ``2weeks``. Default: None. - timezone (str | datetime.timezone | ZoneInfo | None): For raw datapoints, which timezone to use when displaying (will not affect what is retrieved). For aggregates, which timezone to align to for granularity 'hour' and longer. Align to the start of the hour, -day or -month. For timezones of type Region/Location, like 'Europe/Oslo', pass a string or ``ZoneInfo`` instance. The aggregate duration will then vary, typically due to daylight saving time. You can also use a fixed offset from UTC by passing a string like '+04:00', 'UTC-7' or 'UTC-02:30' or an instance of ``datetime.timezone``. Note: Historical timezones with second offset are not supported, and timezones with minute offsets (e.g. UTC+05:30 or Asia/Kolkata) may take longer to execute. - target_unit (str | None): The unit_external_id of the datapoints returned. If the time series does not have a unit_external_id that can be converted to the target_unit, an error will be returned. Cannot be used with target_unit_system. - target_unit_system (str | None): The unit system of the datapoints returned. Cannot be used with target_unit. - limit (int | None): Maximum number of datapoints to return for each time series. Default: None (no limit) - include_outside_points (bool): Whether to include outside points. Not allowed when fetching aggregates. Default: False - ignore_unknown_ids (bool): Whether to ignore missing time series rather than raising an exception. Default: False - ignore_bad_datapoints (bool): Treat datapoints with a bad status code as if they do not exist. If set to false, raw queries will include bad datapoints in the response, and aggregates will in general omit the time period between a bad datapoint and the next good datapoint. Also, the period between a bad datapoint and the previous good datapoint will be considered constant. Default: True. - treat_uncertain_as_bad (bool): Treat datapoints with uncertain status codes as bad. If false, treat datapoints with uncertain status codes as good. Used for both raw queries and aggregates. Default: True. - uniform_index (bool): If only querying aggregates AND a single granularity is used (that's NOT a calendar granularity like month/quarter/year) AND no limit is used AND no timezone is used, specifying `uniform_index=True` will return a dataframe with an equidistant datetime index from the earliest `start` to the latest `end` (missing values will be NaNs). If these requirements are not met, a ValueError is raised. Default: False - include_status (bool): Also return the status code, an integer, for each datapoint in the response. Only relevant for raw datapoint queries, and the object aggregates ``min_datapoint`` and ``max_datapoint``. Also adds the status info as a separate level in the columns (MultiIndex). - include_unit (bool): Include the unit_external_id in the dataframe columns, if present (separate MultiIndex level) - include_aggregate_name (bool): Include aggregate in the dataframe columns, if present (separate MultiIndex level) - include_granularity_name (bool): Include granularity in the dataframe columns, if present (separate MultiIndex level) + id: Id, DatapointsQuery or (mixed) sequence of these. See examples. + external_id: External id, DatapointsQuery or (mixed) sequence of these. See examples. + instance_id: Instance id, DatapointsQuery or (mixed) sequence of these. See examples. + start: Inclusive start. Default: 1970-01-01 UTC. + end: Exclusive end. Default: "now" + aggregates: Single aggregate or list of aggregates to retrieve. Available options: ``average``, ``continuous_variance``, ``count``, ``count_bad``, ``count_good``, ``count_uncertain``, ``discrete_variance``, ``duration_bad``, ``duration_good``, ``duration_uncertain``, ``interpolation``, ``max``, ``max_datapoint``, ``min``, ``min_datapoint``, ``step_interpolation``, ``sum`` and ``total_variation``. Default: None (raw datapoints returned) + granularity: The granularity to fetch aggregates at. Can be given as an abbreviation or spelled out for clarity: ``s/second(s)``, ``m/minute(s)``, ``h/hour(s)``, ``d/day(s)``, ``w/week(s)``, ``mo/month(s)``, ``q/quarter(s)``, or ``y/year(s)``. Examples: ``30s``, ``5m``, ``1day``, ``2weeks``. Default: None. + timezone: For raw datapoints, which timezone to use when displaying (will not affect what is retrieved). For aggregates, which timezone to align to for granularity 'hour' and longer. Align to the start of the hour, -day or -month. For timezones of type Region/Location, like 'Europe/Oslo', pass a string or ``ZoneInfo`` instance. The aggregate duration will then vary, typically due to daylight saving time. You can also use a fixed offset from UTC by passing a string like '+04:00', 'UTC-7' or 'UTC-02:30' or an instance of ``datetime.timezone``. Note: Historical timezones with second offset are not supported, and timezones with minute offsets (e.g. UTC+05:30 or Asia/Kolkata) may take longer to execute. + target_unit: The unit_external_id of the datapoints returned. If the time series does not have a unit_external_id that can be converted to the target_unit, an error will be returned. Cannot be used with target_unit_system. + target_unit_system: The unit system of the datapoints returned. Cannot be used with target_unit. + limit: Maximum number of datapoints to return for each time series. Default: None (no limit) + include_outside_points: Whether to include outside points. Not allowed when fetching aggregates. Default: False + ignore_unknown_ids: Whether to ignore missing time series rather than raising an exception. Default: False + ignore_bad_datapoints: Treat datapoints with a bad status code as if they do not exist. If set to false, raw queries will include bad datapoints in the response, and aggregates will in general omit the time period between a bad datapoint and the next good datapoint. Also, the period between a bad datapoint and the previous good datapoint will be considered constant. Default: True. + treat_uncertain_as_bad: Treat datapoints with uncertain status codes as bad. If false, treat datapoints with uncertain status codes as good. Used for both raw queries and aggregates. Default: True. + uniform_index: If only querying aggregates AND a single granularity is used (that's NOT a calendar granularity like month/quarter/year) AND no limit is used AND no timezone is used, specifying `uniform_index=True` will return a dataframe with an equidistant datetime index from the earliest `start` to the latest `end` (missing values will be NaNs). If these requirements are not met, a ValueError is raised. Default: False + include_status: Also return the status code, an integer, for each datapoint in the response. Only relevant for raw datapoint queries, and the object aggregates ``min_datapoint`` and ``max_datapoint``. Also adds the status info as a separate level in the columns (MultiIndex). + include_unit: Include the unit_external_id in the dataframe columns, if present (separate MultiIndex level) + include_aggregate_name: Include aggregate in the dataframe columns, if present (separate MultiIndex level) + include_granularity_name: Include granularity in the dataframe columns, if present (separate MultiIndex level) Returns: - pd.DataFrame: A pandas DataFrame containing the requested time series. The ordering of columns is ids first, then external_ids, and lastly instance_ids. For time series with multiple aggregates, they will be sorted in alphabetical order ("average" before "max"). + A pandas DataFrame containing the requested time series. The ordering of columns is ids first, then external_ids, and lastly instance_ids. For time series with multiple aggregates, they will be sorted in alphabetical order ("average" before "max"). Tip: Pandas DataFrames have one shared index, so when you fetch datapoints from multiple time series, the final index will be @@ -1200,19 +1200,19 @@ def retrieve_latest( `status codes. `_ Args: - id (int | LatestDatapointQuery | Sequence[int | LatestDatapointQuery] | None): Id or list of ids. - external_id (str | LatestDatapointQuery | SequenceNotStr[str | LatestDatapointQuery] | None): External id or list of external ids. - instance_id (NodeId | LatestDatapointQuery | Sequence[NodeId | LatestDatapointQuery] | None): Instance id or list of instance ids. - before (None | int | str | datetime.datetime): Get latest datapoint before this time. Not used when passing 'LatestDatapointQuery'. - target_unit (str | None): The unit_external_id of the datapoint returned. If the time series does not have a unit_external_id that can be converted to the target_unit, an error will be returned. Cannot be used with target_unit_system. - target_unit_system (str | None): The unit system of the datapoint returned. Cannot be used with target_unit. - include_status (bool): Also return the status code, an integer, for each datapoint in the response. - ignore_bad_datapoints (bool): Prevent datapoints with a bad status code to be returned. Default: True. - treat_uncertain_as_bad (bool): Treat uncertain status codes as bad. If false, treat uncertain as good. Default: True. - ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception. + id: Id or list of ids. + external_id: External id or list of external ids. + instance_id: Instance id or list of instance ids. + before: Get latest datapoint before this time. Not used when passing 'LatestDatapointQuery'. + target_unit: The unit_external_id of the datapoint returned. If the time series does not have a unit_external_id that can be converted to the target_unit, an error will be returned. Cannot be used with target_unit_system. + target_unit_system: The unit system of the datapoint returned. Cannot be used with target_unit. + include_status: Also return the status code, an integer, for each datapoint in the response. + ignore_bad_datapoints: Prevent datapoints with a bad status code to be returned. Default: True. + treat_uncertain_as_bad: Treat uncertain status codes as bad. If false, treat uncertain as good. Default: True. + ignore_unknown_ids: Ignore IDs and external IDs that are not found rather than throw an exception. Returns: - Datapoints | DatapointsList | None: A Datapoints object containing the requested data, or a DatapointsList if multiple were requested. If `ignore_unknown_ids` is `True`, a single time series is requested and it is not found, the function will return `None`. + A Datapoints object containing the requested data, or a DatapointsList if multiple were requested. If `ignore_unknown_ids` is `True`, a single time series is requested and it is not found, the function will return `None`. Examples: @@ -1314,10 +1314,10 @@ def insert( `status codes. `_ Args: - datapoints (Datapoints | DatapointsArray | Sequence[dict[str, int | float | str | datetime.datetime]] | Sequence[tuple[int | float | datetime.datetime, int | float | str] | tuple[int | float | datetime.datetime, int | float | str, int]]): The datapoints you wish to insert. Can either be a list of tuples, a list of dictionaries, a Datapoints object or a DatapointsArray object. See examples below. - id (int | None): Id of time series to insert datapoints into. - external_id (str | None): External id of time series to insert datapoint into. - instance_id (NodeId | None): Instance ID of time series to insert datapoints into. + datapoints: The datapoints you wish to insert. Can either be a list of tuples, a list of dictionaries, a Datapoints object or a DatapointsArray object. See examples below. + id: Id of time series to insert datapoints into. + external_id: External id of time series to insert datapoint into. + instance_id: Instance ID of time series to insert datapoints into. Note: All datapoints inserted without a status code (or symbol) is assumed to be good (code 0). To mark a value, pass @@ -1403,7 +1403,7 @@ def insert_multiple( `status codes. `_ Args: - datapoints (list[dict[str, str | int | list | Datapoints | DatapointsArray | NodeId]]): The datapoints you wish to insert along with the ids of the time series. See examples below. + datapoints: The datapoints you wish to insert along with the ids of the time series. See examples below. Note: All datapoints inserted without a status code (or symbol) is assumed to be good (code 0). To mark a value, pass @@ -1475,11 +1475,11 @@ def delete_range( Delete a range of datapoints from a time series. Args: - start (int | str | datetime.datetime): Inclusive start of delete range - end (int | str | datetime.datetime): Exclusive end of delete range - id (int | None): Id of time series to delete data from - external_id (str | None): External id of time series to delete data from - instance_id (NodeId | None): Instance ID of time series to delete data from + start: Inclusive start of delete range + end: Exclusive end of delete range + id: Id of time series to delete data from + external_id: External id of time series to delete data from + instance_id: Instance ID of time series to delete data from Examples: @@ -1505,7 +1505,7 @@ def delete_ranges(self, ranges: list[dict[str, Any]]) -> None: `Delete a range of datapoints from multiple time series. `_ Args: - ranges (list[dict[str, Any]]): The list of datapoint ids along with time range to delete. See examples below. + ranges: The list of datapoint ids along with time range to delete. See examples below. Examples: @@ -1532,8 +1532,8 @@ def insert_dataframe(self, df: pd.DataFrame, dropna: bool = True) -> None: The column identifiers must be unique. Args: - df (pd.DataFrame): Pandas DataFrame object containing the time series. - dropna (bool): Set to True to ignore NaNs in the given DataFrame, applied per column. Default: True. + df: Pandas DataFrame object containing the time series. + dropna: Set to True to ignore NaNs in the given DataFrame, applied per column. Default: True. Warning: You can not insert datapoints with status codes using this method (``insert_dataframe``), you'll need diff --git a/cognite/client/_sync_api/datapoints_subscriptions.py b/cognite/client/_sync_api/datapoints_subscriptions.py index 76900fa516..4d2f716741 100644 --- a/cognite/client/_sync_api/datapoints_subscriptions.py +++ b/cognite/client/_sync_api/datapoints_subscriptions.py @@ -1,6 +1,6 @@ """ =============================================================================== -85e289f5eaf4fab3b555c4c777a0fb48 +4c3b1ef16e410f6894347a773b11ccaa This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -47,11 +47,11 @@ def __call__( Iterate over all datapoint subscriptions. Args: - chunk_size (int | None): The number of datapoint subscriptions to fetch per request. Defaults to yielding one datapoint subscription at a time. - limit (int | None): Maximum number of items to return. Defaults to return all datapoint subscriptions. + chunk_size: The number of datapoint subscriptions to fetch per request. Defaults to yielding one datapoint subscription at a time. + limit: Maximum number of items to return. Defaults to return all datapoint subscriptions. Yields: - DatapointSubscription | DatapointSubscriptionList: Yields datapoint subscriptions one by one if chunk is not specified, otherwise returns a list of datapoint subscriptions. + Yields datapoint subscriptions one by one if chunk is not specified, otherwise returns a list of datapoint subscriptions. """ # noqa: DOC404 yield from SyncIterator(self.__async_client.time_series.subscriptions(chunk_size=chunk_size, limit=limit)) # type: ignore [misc] @@ -62,10 +62,10 @@ def create(self, subscription: DataPointSubscriptionWrite) -> DatapointSubscript Create a subscription that can be used to listen for changes in data points for a set of time series. Args: - subscription (DataPointSubscriptionWrite): Subscription to create. + subscription: Subscription to create. Returns: - DatapointSubscription: Created subscription + Created subscription Examples: @@ -116,8 +116,8 @@ def delete(self, external_id: str | SequenceNotStr[str], ignore_unknown_ids: boo `Delete subscription(s). This operation cannot be undone. `_ Args: - external_id (str | SequenceNotStr[str]): External ID or list of external IDs of subscriptions to delete. - ignore_unknown_ids (bool): Whether to ignore IDs and external IDs that are not found rather than throw an exception. + external_id: External ID or list of external IDs of subscriptions to delete. + ignore_unknown_ids: Whether to ignore IDs and external IDs that are not found rather than throw an exception. Examples: @@ -139,10 +139,10 @@ def retrieve(self, external_id: str) -> DatapointSubscription | None: `Retrieve one subscription by external ID. `_ Args: - external_id (str): External ID of the subscription to retrieve. + external_id: External ID of the subscription to retrieve. Returns: - DatapointSubscription | None: The requested subscription. + The requested subscription. Examples: @@ -162,11 +162,11 @@ def list_member_time_series(self, external_id: str, limit: int | None = DEFAULT_ Retrieve a list of time series (IDs) that the subscription is currently retrieving updates from Args: - external_id (str): External ID of the subscription to retrieve members of. - limit (int | None): Maximum number of time series to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + external_id: External ID of the subscription to retrieve members of. + limit: Maximum number of time series to return. Defaults to 25. Set to -1, float("inf") or None to return all items. Returns: - TimeSeriesIDList: List of time series in the subscription. + List of time series in the subscription. Examples: @@ -195,11 +195,11 @@ def update( Furthermore, the subscription partition cannot be changed. Args: - update (DataPointSubscriptionUpdate | DataPointSubscriptionWrite): The subscription update. - mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (DataPointSubscriptionWrite). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. + update: The subscription update. + mode: How to update data when a non-update object is given (DataPointSubscriptionWrite). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. Returns: - DatapointSubscription: Updated subscription. + Updated subscription. Examples: @@ -244,18 +244,18 @@ def iterate_data( older than 7 days may be discarded. Args: - external_id (str): The external ID of the subscription. - start (str | None): When to start the iteration. If set to None, the iteration will start from the beginning. The format is "N[timeunit]-ago", where timeunit is w,d,h,m (week, day, hour, minute). For example, "12h-ago" will start the iteration from 12 hours ago. You can also set it to "now" to jump straight to the end. Defaults to None. - limit (int): Approximate number of results to return across all partitions. - partition (int): The partition to iterate over. Defaults to 0. - poll_timeout (int): How many seconds to wait for new data, until an empty response is sent. Defaults to 5. - cursor (str | None): Optional cursor to start iterating from. - include_status (bool): Also return the status code, an integer, for each datapoint in the response. - ignore_bad_datapoints (bool): Do not return bad datapoints. Default: True. - treat_uncertain_as_bad (bool): Treat datapoints with uncertain status codes as bad. If false, treat datapoints with uncertain status codes as good. Default: True. + external_id: The external ID of the subscription. + start: When to start the iteration. If set to None, the iteration will start from the beginning. The format is "N[timeunit]-ago", where timeunit is w,d,h,m (week, day, hour, minute). For example, "12h-ago" will start the iteration from 12 hours ago. You can also set it to "now" to jump straight to the end. Defaults to None. + limit: Approximate number of results to return across all partitions. + partition: The partition to iterate over. Defaults to 0. + poll_timeout: How many seconds to wait for new data, until an empty response is sent. Defaults to 5. + cursor: Optional cursor to start iterating from. + include_status: Also return the status code, an integer, for each datapoint in the response. + ignore_bad_datapoints: Do not return bad datapoints. Default: True. + treat_uncertain_as_bad: Treat datapoints with uncertain status codes as bad. If false, treat datapoints with uncertain status codes as good. Default: True. Yields: - DatapointSubscriptionBatch: Changes to the subscription and data in the subscribed time series. + Changes to the subscription and data in the subscribed time series. Examples: @@ -301,9 +301,9 @@ def list(self, limit: int | None = DEFAULT_LIMIT_READ) -> DatapointSubscriptionL `List data point subscriptions `_ Args: - limit (int | None): Maximum number of subscriptions to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + limit: Maximum number of subscriptions to return. Defaults to 25. Set to -1, float("inf") or None to return all items. Returns: - DatapointSubscriptionList: List of requested datapoint subscriptions + List of requested datapoint subscriptions Examples: diff --git a/cognite/client/_sync_api/diagrams.py b/cognite/client/_sync_api/diagrams.py index eb6f492a01..806c0c5b54 100644 --- a/cognite/client/_sync_api/diagrams.py +++ b/cognite/client/_sync_api/diagrams.py @@ -1,6 +1,6 @@ """ =============================================================================== -65f9b6c08f8e220757db70e1267a74e5 +8f129c376e15e09069c7d1096a621a73 This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -91,19 +91,19 @@ def detect( are able to access the data sent to this endpoint. Args: - entities (Sequence[dict | CogniteResource]): List of entities to detect - search_field (str): If entities is a list of dictionaries, this is the key to the values to detect in the PnId - partial_match (bool): Allow for a partial match (e.g. missing prefix). - min_tokens (int): Minimal number of tokens a match must be based on - file_ids (int | Sequence[int] | None): ID of the files, should already be uploaded in the same tenant. - file_external_ids (str | SequenceNotStr[str] | None): File external ids, alternative to file_ids and file_references. - file_instance_ids (NodeId | Sequence[NodeId] | None): Files to detect in, specified by instance id. - file_references (list[FileReference] | FileReference | None): File references (id, external_id or instance_id), and first_page and last_page to specify page ranges per file. Each reference can specify up to 50 pages. Providing a page range will also make the page count of the document a part of the response. - pattern_mode (bool | None): If True, entities must be provided with a sample field. This enables detecting tags that are similar to the sample, but not necessarily identical. Defaults to None. - configuration (DiagramDetectConfig | None): Additional configuration for the detect algorithm. See `DiagramDetectConfig` class documentation and `beta API docs `_. - multiple_jobs (bool): Enables you to publish multiple jobs. If True the method returns a tuple of DetectJobBundle and list of potentially unposted files. If False it will return a single DiagramDetectResults. Defaults to False. + entities: List of entities to detect + search_field: If entities is a list of dictionaries, this is the key to the values to detect in the PnId + partial_match: Allow for a partial match (e.g. missing prefix). + min_tokens: Minimal number of tokens a match must be based on + file_ids: ID of the files, should already be uploaded in the same tenant. + file_external_ids: File external ids, alternative to file_ids and file_references. + file_instance_ids: Files to detect in, specified by instance id. + file_references: File references (id, external_id or instance_id), and first_page and last_page to specify page ranges per file. Each reference can specify up to 50 pages. Providing a page range will also make the page count of the document a part of the response. + pattern_mode: If True, entities must be provided with a sample field. This enables detecting tags that are similar to the sample, but not necessarily identical. Defaults to None. + configuration: Additional configuration for the detect algorithm. See `DiagramDetectConfig` class documentation and `beta API docs `_. + multiple_jobs: Enables you to publish multiple jobs. If True the method returns a tuple of DetectJobBundle and list of potentially unposted files. If False it will return a single DiagramDetectResults. Defaults to False. Returns: - DiagramDetectResults | tuple[DetectJobBundle, list[dict[str, Any]]]: Resulting queued job or a bundle of jobs and a list of unposted files. Note that the .result property of the job or job bundle will block waiting for results. + Resulting queued job or a bundle of jobs and a list of unposted files. Note that the .result property of the job or job bundle will block waiting for results. Note: The results are not written to CDF, to create annotations based on detected entities use `AnnotationsAPI`. @@ -200,10 +200,10 @@ def convert(self, detect_job: DiagramDetectResults) -> DiagramConvertResults: Will automatically wait for the detect job to complete before starting the conversion. Args: - detect_job (DiagramDetectResults): detect job + detect_job: detect job Returns: - DiagramConvertResults: Resulting queued job. + Resulting queued job. Examples: diff --git a/cognite/client/_sync_api/document_preview.py b/cognite/client/_sync_api/document_preview.py index e1954618a0..d17ef22275 100644 --- a/cognite/client/_sync_api/document_preview.py +++ b/cognite/client/_sync_api/document_preview.py @@ -1,6 +1,6 @@ """ =============================================================================== -017294c2f92ed12bd676cae54d58c19d +f1a31d9adc227d6031aa7211314be3c4 This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -27,11 +27,11 @@ def download_page_as_png_bytes(self, id: int, page_number: int = 1) -> bytes: `Downloads an image preview for a specific page of the specified document. `_ Args: - id (int): The server-generated ID for the document you want to retrieve the preview of. - page_number (int): Page number to preview. Starting at 1 for first page. + id: The server-generated ID for the document you want to retrieve the preview of. + page_number: Page number to preview. Starting at 1 for first page. Returns: - bytes: The png preview of the document. + The png preview of the document. Examples: @@ -59,10 +59,10 @@ def download_page_as_png( `Downloads an image preview for a specific page of the specified document. `_ Args: - path (Path | str | IO): The path to save the png preview of the document. If the path is a directory, the file name will be '[id]_page[page_number].png'. - id (int): The server-generated ID for the document you want to retrieve the preview of. - page_number (int): Page number to preview. Starting at 1 for first page. - overwrite (bool): Whether to overwrite existing file at the given path. Defaults to False. + path: The path to save the png preview of the document. If the path is a directory, the file name will be '[id]_page[page_number].png'. + id: The server-generated ID for the document you want to retrieve the preview of. + page_number: Page number to preview. Starting at 1 for first page. + overwrite: Whether to overwrite existing file at the given path. Defaults to False. Examples: @@ -86,10 +86,10 @@ def download_document_as_pdf_bytes(self, id: int) -> bytes: Previews will be rendered if necessary during the request. Be prepared for the request to take a few seconds to complete. Args: - id (int): The server-generated ID for the document you want to retrieve the preview of. + id: The server-generated ID for the document you want to retrieve the preview of. Returns: - bytes: The pdf preview of the document. + The pdf preview of the document. Examples: @@ -109,9 +109,9 @@ def download_document_as_pdf(self, path: Path | str | IO, id: int, overwrite: bo Previews will be rendered if necessary during the request. Be prepared for the request to take a few seconds to complete. Args: - path (Path | str | IO): The path to save the pdf preview of the document. If the path is a directory, the file name will be '[id].pdf'. - id (int): The server-generated ID for the document you want to retrieve the preview of. - overwrite (bool): Whether to overwrite existing file at the given path. Defaults to False. + path: The path to save the pdf preview of the document. If the path is a directory, the file name will be '[id].pdf'. + id: The server-generated ID for the document you want to retrieve the preview of. + overwrite: Whether to overwrite existing file at the given path. Defaults to False. Examples: @@ -131,10 +131,10 @@ def retrieve_pdf_link(self, id: int) -> TemporaryLink: `Retrieve a Temporary link to download pdf preview `_ Args: - id (int): The server-generated ID for the document you want to retrieve the preview of. + id: The server-generated ID for the document you want to retrieve the preview of. Returns: - TemporaryLink: A temporary link to download the pdf preview. + A temporary link to download the pdf preview. Examples: diff --git a/cognite/client/_sync_api/documents.py b/cognite/client/_sync_api/documents.py index 7e36a47d8d..64410fe122 100644 --- a/cognite/client/_sync_api/documents.py +++ b/cognite/client/_sync_api/documents.py @@ -1,6 +1,6 @@ """ =============================================================================== -a2e2ce17c4a937f9593fe19d561085fc +4128482cbfd6921cf20dd2190ad3b7e3 This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -70,13 +70,13 @@ def __call__( Fetches documents as they are iterated over, so you keep a limited number of documents in memory. Args: - chunk_size (int | None): Number of documents to return in each chunk. Defaults to yielding one document at a time. - filter (Filter | dict[str, Any] | None): The filter to narrow down the documents to return. - sort (DocumentSort | SortableProperty | tuple[SortableProperty, Literal['asc', 'desc']] | None): The property to sort by. The default order is ascending. - limit (int | None): Maximum number of documents to return. Default to return all items. + chunk_size: Number of documents to return in each chunk. Defaults to yielding one document at a time. + filter: The filter to narrow down the documents to return. + sort: The property to sort by. The default order is ascending. + limit: Maximum number of documents to return. Default to return all items. Yields: - Document | DocumentList: yields Documents one by one if chunk_size is not specified, else DocumentList objects. + yields Documents one by one if chunk_size is not specified, else DocumentList objects. """ # noqa: DOC404 yield from SyncIterator( self.__async_client.documents(chunk_size=chunk_size, filter=filter, sort=sort, limit=limit) @@ -87,11 +87,11 @@ def aggregate_count(self, query: str | None = None, filter: Filter | dict[str, A `Count of documents matching the specified filters and search. `_ Args: - query (str | None): The free text search query, for details see the documentation referenced above. - filter (Filter | dict[str, Any] | None): The filter to narrow down the documents to count. + query: The free text search query, for details see the documentation referenced above. + filter: The filter to narrow down the documents to count. Returns: - int: The number of documents matching the specified filters and search. + The number of documents matching the specified filters and search. Examples: @@ -132,13 +132,13 @@ def aggregate_cardinality_values( `Find approximate property count for documents. `_ Args: - property (DocumentProperty | SourceFileProperty | list[str] | str): The property to count the cardinality of. - query (str | None): The free text search query, for details see the documentation referenced above. - filter (Filter | dict[str, Any] | None): The filter to narrow down the documents to count cardinality. - aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets. + property: The property to count the cardinality of. + query: The free text search query, for details see the documentation referenced above. + filter: The filter to narrow down the documents to count cardinality. + aggregate_filter: The filter to apply to the resulting buckets. Returns: - int: The number of documents matching the specified filters and search. + The number of documents matching the specified filters and search. Examples: @@ -182,13 +182,13 @@ def aggregate_cardinality_properties( `Find approximate paths count for documents. `_ Args: - path (SourceFileProperty | list[str]): The scope in every document to aggregate properties. The only value allowed now is ["sourceFile", "metadata"]. It means to aggregate only metadata properties (aka keys). - query (str | None): The free text search query, for details see the documentation referenced above. - filter (Filter | dict[str, Any] | None): The filter to narrow down the documents to count cardinality. - aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets. + path: The scope in every document to aggregate properties. The only value allowed now is ["sourceFile", "metadata"]. It means to aggregate only metadata properties (aka keys). + query: The free text search query, for details see the documentation referenced above. + filter: The filter to narrow down the documents to count cardinality. + aggregate_filter: The filter to apply to the resulting buckets. Returns: - int: The number of documents matching the specified filters and search. + The number of documents matching the specified filters and search. Examples: @@ -217,14 +217,14 @@ def aggregate_unique_values( `Get unique properties with counts for documents. `_ Args: - property (DocumentProperty | SourceFileProperty | list[str] | str): The property to group by. - query (str | None): The free text search query, for details see the documentation referenced above. - filter (Filter | dict[str, Any] | None): The filter to narrow down the documents to count cardinality. - aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets. - limit (int): Maximum number of items. Defaults to 25. + property: The property to group by. + query: The free text search query, for details see the documentation referenced above. + filter: The filter to narrow down the documents to count cardinality. + aggregate_filter: The filter to apply to the resulting buckets. + limit: Maximum number of items. Defaults to 25. Returns: - UniqueResultList: List of unique values of documents matching the specified filters and search. + List of unique values of documents matching the specified filters and search. Examples: @@ -272,14 +272,14 @@ def aggregate_unique_properties( `Get unique paths with counts for documents. `_ Args: - path (DocumentProperty | SourceFileProperty | list[str] | str): The scope in every document to aggregate properties. The only value allowed now is ["metadata"]. It means to aggregate only metadata properties (aka keys). - query (str | None): The free text search query, for details see the documentation referenced above. - filter (Filter | dict[str, Any] | None): The filter to narrow down the documents to count cardinality. - aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets. - limit (int): Maximum number of items. Defaults to 25. + path: The scope in every document to aggregate properties. The only value allowed now is ["metadata"]. It means to aggregate only metadata properties (aka keys). + query: The free text search query, for details see the documentation referenced above. + filter: The filter to narrow down the documents to count cardinality. + aggregate_filter: The filter to apply to the resulting buckets. + limit: Maximum number of items. Defaults to 25. Returns: - UniqueResultList: List of unique values of documents matching the specified filters and search. + List of unique values of documents matching the specified filters and search. Examples: @@ -311,12 +311,12 @@ def retrieve_content( you can use this endpoint. Args: - id (int | None): The server-generated ID for the document you want to retrieve the content of. - external_id (str | None): External ID of the document. - instance_id (NodeId | None): Instance ID of the document. + id: The server-generated ID for the document you want to retrieve the content of. + external_id: External ID of the document. + instance_id: Instance ID of the document. Returns: - bytes: The content of the document. + The content of the document. Examples: @@ -355,10 +355,10 @@ def retrieve_content_buffer( you can use this endpoint. Args: - buffer (BinaryIO): The document content is streamed directly into the buffer. This is useful for retrieving large documents. - id (int | None): The server-generated ID for the document you want to retrieve the content of. - external_id (str | None): External ID of the document. - instance_id (NodeId | None): Instance ID of the document. + buffer: The document content is streamed directly into the buffer. This is useful for retrieving large documents. + id: The server-generated ID for the document you want to retrieve the content of. + external_id: External ID of the document. + instance_id: Instance ID of the document. Examples: @@ -418,14 +418,14 @@ def search( endpoint documentation referenced above. Args: - query (str): The free text search query. - highlight (bool): Whether or not matches in search results should be highlighted. - filter (Filter | dict[str, Any] | None): The filter to narrow down the documents to search. - sort (DocumentSort | SortableProperty | tuple[SortableProperty, Literal['asc', 'desc']] | None): The property to sort by. The default order is ascending. - limit (int): Maximum number of items to return. When using highlights, the maximum value is reduced to 20. Defaults to 25. + query: The free text search query. + highlight: Whether or not matches in search results should be highlighted. + filter: The filter to narrow down the documents to search. + sort: The property to sort by. The default order is ascending. + limit: Maximum number of items to return. When using highlights, the maximum value is reduced to 20. Defaults to 25. Returns: - DocumentList | DocumentHighlightList: List of search results. If highlight is True, a DocumentHighlightList is returned, otherwise a DocumentList is returned. + List of search results. If highlight is True, a DocumentHighlightList is returned, otherwise a DocumentList is returned. Examples: @@ -473,12 +473,12 @@ def list( project. Args: - filter (Filter | dict[str, Any] | None): Filter | dict[str, Any] | None): The filter to narrow down the documents to return. - sort (DocumentSort | SortableProperty | tuple[SortableProperty, Literal['asc', 'desc']] | None): The property to sort by. The default order is ascending. - limit (int | None): Maximum number of documents to return. Defaults to 25. Set to None or -1 to return all documents. + filter: The filter to narrow down the documents to return. + sort: The property to sort by. The default order is ascending. + limit: Maximum number of documents to return. Defaults to 25. Set to None or -1 to return all documents. Returns: - DocumentList: List of documents + List of documents Examples: diff --git a/cognite/client/_sync_api/entity_matching.py b/cognite/client/_sync_api/entity_matching.py index e1fb873071..d8d6021b72 100644 --- a/cognite/client/_sync_api/entity_matching.py +++ b/cognite/client/_sync_api/entity_matching.py @@ -1,6 +1,6 @@ """ =============================================================================== -1eaa0d561ca8b8cd03d87b44b8c0c8bc +3f883e3cf967206d715bdaef521dbb22 This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -36,11 +36,11 @@ def retrieve(self, id: int | None = None, external_id: str | None = None) -> Ent `Retrieve model `_ Args: - id (int | None): id of the model to retrieve. - external_id (str | None): external id of the model to retrieve. + id: id of the model to retrieve. + external_id: external id of the model to retrieve. Returns: - EntityMatchingModel | None: Model requested. + Model requested. Examples: >>> from cognite.client import CogniteClient, AsyncCogniteClient @@ -57,11 +57,11 @@ def retrieve_multiple( `Retrieve models `_ Args: - ids (Sequence[int] | None): ids of the model to retrieve. - external_ids (SequenceNotStr[str] | None): external ids of the model to retrieve. + ids: ids of the model to retrieve. + external_ids: external ids of the model to retrieve. Returns: - EntityMatchingModelList: Models requested. + Models requested. Examples: >>> from cognite.client import CogniteClient, AsyncCogniteClient @@ -82,11 +82,11 @@ def update( `Update model `_ Args: - item (EntityMatchingModel | EntityMatchingModelUpdate | Sequence[EntityMatchingModel | EntityMatchingModelUpdate]): Model(s) to update - mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (EntityMatchingModel). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. + item: Model(s) to update + mode: How to update data when a non-update object is given (EntityMatchingModel). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. Returns: - EntityMatchingModelList | EntityMatchingModel: No description. + No description. Examples: >>> from cognite.client.data_classes.contextualization import EntityMatchingModelUpdate @@ -110,15 +110,15 @@ def list( `List models `_ Args: - name (str | None): Optional user-defined name of model. - description (str | None): Optional user-defined description of model. - original_id (int | None): id of the original model for models that were created with refit. - feature_type (str | None): feature type that defines the combination of features used. - classifier (str | None): classifier used in training. - limit (int | None): Maximum number of items to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + name: Optional user-defined name of model. + description: Optional user-defined description of model. + original_id: id of the original model for models that were created with refit. + feature_type: feature type that defines the combination of features used. + classifier: classifier used in training. + limit: Maximum number of items to return. Defaults to 25. Set to -1, float("inf") or None to return all items. Returns: - EntityMatchingModelList: List of models. + List of models. Examples: >>> from cognite.client import CogniteClient, AsyncCogniteClient @@ -142,7 +142,7 @@ def list_jobs(self) -> ContextualizationJobList: List jobs, typically model fit and predict runs. Returns: - ContextualizationJobList: List of jobs. + List of jobs. """ return run_sync(self.__async_client.entity_matching.list_jobs()) @@ -156,8 +156,8 @@ def delete( Args: - id (int | Sequence[int] | None): Id or list of ids - external_id (str | SequenceNotStr[str] | None): External ID or list of external ids + id: Id or list of ids + external_id: External ID or list of external ids Examples: >>> from cognite.client import CogniteClient, AsyncCogniteClient >>> client = CogniteClient() @@ -187,18 +187,18 @@ def fit( capabilities in the project, are able to access the data sent to this endpoint. Args: - sources (Sequence[dict | CogniteResource]): entities to match from, should have an 'id' field. Tolerant to passing more than is needed or used (e.g. json dump of time series list). Metadata fields are automatically flattened to "metadata.key" entries, such that they can be used in match_fields. - targets (Sequence[dict | CogniteResource]): entities to match to, should have an 'id' field. Tolerant to passing more than is needed or used. - true_matches (Sequence[dict | tuple[int | str, int | str]] | None): Known valid matches given as a list of dicts with keys 'sourceId', 'sourceExternalId', 'targetId', 'targetExternalId'). If omitted, uses an unsupervised model. A tuple can be used instead of the dictionary for convenience, interpreted as id/externalId based on type. - match_fields (dict | Sequence[tuple[str, str]] | None): List of (from,to) keys to use in matching. Default in the API is [('name','name')]. Also accepts {"source": .., "target": ..}. - feature_type (str | None): feature type that defines the combination of features used, see API docs for details. - classifier (str | None): classifier used in training. - ignore_missing_fields (bool): whether missing data in match_fields should return error or be filled in with an empty string. - name (str | None): Optional user-defined name of model. - description (str | None): Optional user-defined description of model. - external_id (str | None): Optional external id. Must be unique within the project. + sources: entities to match from, should have an 'id' field. Tolerant to passing more than is needed or used (e.g. json dump of time series list). Metadata fields are automatically flattened to "metadata.key" entries, such that they can be used in match_fields. + targets: entities to match to, should have an 'id' field. Tolerant to passing more than is needed or used. + true_matches: Known valid matches given as a list of dicts with keys 'sourceId', 'sourceExternalId', 'targetId', 'targetExternalId'). If omitted, uses an unsupervised model. A tuple can be used instead of the dictionary for convenience, interpreted as id/externalId based on type. + match_fields: List of (from,to) keys to use in matching. Default in the API is [('name','name')]. Also accepts {"source": .., "target": ..}. + feature_type: feature type that defines the combination of features used, see API docs for details. + classifier: classifier used in training. + ignore_missing_fields: whether missing data in match_fields should return error or be filled in with an empty string. + name: Optional user-defined name of model. + description: Optional user-defined description of model. + external_id: Optional external id. Must be unique within the project. Returns: - EntityMatchingModel: Resulting queued model. + Resulting queued model. Example: >>> from cognite.client import CogniteClient, AsyncCogniteClient @@ -249,15 +249,15 @@ def predict( capabilities in the project, are able to access the data sent to this endpoint. Args: - sources (Sequence[dict] | None): entities to match from, does not need an 'id' field. Tolerant to passing more than is needed or used (e.g. json dump of time series list). If omitted, will use data from fit. - targets (Sequence[dict] | None): entities to match to, does not need an 'id' field. Tolerant to passing more than is needed or used. If omitted, will use data from fit. - num_matches (int): number of matches to return for each item. - score_threshold (float | None): only return matches with a score above this threshold - id (int | None): id of the model to use. - external_id (str | None): external id of the model to use. + sources: entities to match from, does not need an 'id' field. Tolerant to passing more than is needed or used (e.g. json dump of time series list). If omitted, will use data from fit. + targets: entities to match to, does not need an 'id' field. Tolerant to passing more than is needed or used. If omitted, will use data from fit. + num_matches: number of matches to return for each item. + score_threshold: only return matches with a score above this threshold + id: id of the model to use. + external_id: external id of the model to use. Returns: - EntityMatchingPredictionResult: object which can be used to wait for and retrieve results. + object which can be used to wait for and retrieve results. Examples: >>> from cognite.client import CogniteClient, AsyncCogniteClient @@ -299,11 +299,11 @@ def refit( capabilities in the project, are able to access the data sent to this endpoint. Args: - true_matches (Sequence[dict | tuple[int | str, int | str]]): Updated known valid matches given as a list of dicts with keys 'fromId', 'fromExternalId', 'toId', 'toExternalId'). A tuple can be used instead of the dictionary for convenience, interpreted as id/externalId based on type. - id (int | None): id of the model to use. - external_id (str | None): external id of the model to use. + true_matches: Updated known valid matches given as a list of dicts with keys 'fromId', 'fromExternalId', 'toId', 'toExternalId'). A tuple can be used instead of the dictionary for convenience, interpreted as id/externalId based on type. + id: id of the model to use. + external_id: external id of the model to use. Returns: - EntityMatchingModel: new model refitted to true_matches. + new model refitted to true_matches. Examples: >>> from cognite.client import CogniteClient, AsyncCogniteClient diff --git a/cognite/client/_sync_api/events.py b/cognite/client/_sync_api/events.py index c0042333c9..25a9bdb50a 100644 --- a/cognite/client/_sync_api/events.py +++ b/cognite/client/_sync_api/events.py @@ -1,6 +1,6 @@ """ =============================================================================== -89cb2979c8c928e5dbdc38ec14974271 +77d3ee5e2efc989a1d91bebec687e704 This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -114,29 +114,29 @@ def __call__( Fetches events as they are iterated over, so you keep a limited number of events in memory. Args: - chunk_size (int | None): Number of events to return in each chunk. Defaults to yielding one event a time. - start_time (dict[str, Any] | TimestampRange | None): Range between two timestamps - end_time (dict[str, Any] | EndTimeFilter | None): Range between two timestamps - active_at_time (dict[str, Any] | TimestampRange | None): Event is considered active from its startTime to endTime inclusive. If startTime is null, event is never active. If endTime is null, event is active from startTime onwards. activeAtTime filter will match all events that are active at some point from min to max, from min, or to max, depending on which of min and max parameters are specified. - type (str | None): Type of the event, e.g 'failure'. - subtype (str | None): Subtype of the event, e.g 'electrical'. - metadata (dict[str, str] | None): Customizable extra data about the event. String key -> String value. - asset_ids (Sequence[int] | None): Asset IDs of related equipments that this event relates to. - asset_external_ids (SequenceNotStr[str] | None): Asset External IDs of related equipment that this event relates to. - asset_subtree_ids (int | Sequence[int] | None): Only include events that have a related asset in a subtree rooted at any of these assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. - asset_subtree_external_ids (str | SequenceNotStr[str] | None): Only include events that have a related asset in a subtree rooted at any of these assetExternalIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. - data_set_ids (int | Sequence[int] | None): Return only events in the specified data set(s) with this id / these ids. - data_set_external_ids (str | SequenceNotStr[str] | None): Return only events in the specified data set(s) with this external id / these external ids. - source (str | None): The source of this event. - created_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. - last_updated_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. - external_id_prefix (str | None): External Id provided by client. Should be unique within the project - sort (SortSpec | list[SortSpec] | None): The criteria to sort by. Defaults to desc for `_score_` and asc for all other properties. Sort is not allowed if `partitions` is used. - limit (int | None): Maximum number of events to return. Defaults to return all items. - advanced_filter (Filter | dict[str, Any] | None): Advanced filter query using the filter DSL (Domain Specific Language). It allows defining complex filtering expressions that combine simple operations, such as equals, prefix, exists, etc., using boolean operators and, or, and not. + chunk_size: Number of events to return in each chunk. Defaults to yielding one event a time. + start_time: Range between two timestamps + end_time: Range between two timestamps + active_at_time: Event is considered active from its startTime to endTime inclusive. If startTime is null, event is never active. If endTime is null, event is active from startTime onwards. activeAtTime filter will match all events that are active at some point from min to max, from min, or to max, depending on which of min and max parameters are specified. + type: Type of the event, e.g 'failure'. + subtype: Subtype of the event, e.g 'electrical'. + metadata: Customizable extra data about the event. String key -> String value. + asset_ids: Asset IDs of related equipments that this event relates to. + asset_external_ids: Asset External IDs of related equipment that this event relates to. + asset_subtree_ids: Only include events that have a related asset in a subtree rooted at any of these assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. + asset_subtree_external_ids: Only include events that have a related asset in a subtree rooted at any of these assetExternalIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. + data_set_ids: Return only events in the specified data set(s) with this id / these ids. + data_set_external_ids: Return only events in the specified data set(s) with this external id / these external ids. + source: The source of this event. + created_time: Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. + last_updated_time: Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. + external_id_prefix: External Id provided by client. Should be unique within the project + sort: The criteria to sort by. Defaults to desc for `_score_` and asc for all other properties. Sort is not allowed if `partitions` is used. + limit: Maximum number of events to return. Defaults to return all items. + advanced_filter: Advanced filter query using the filter DSL (Domain Specific Language). It allows defining complex filtering expressions that combine simple operations, such as equals, prefix, exists, etc., using boolean operators and, or, and not. Yields: - Event | EventList: yields Event one by one if chunk_size is not specified, else EventList objects. + yields Event one by one if chunk_size is not specified, else EventList objects. """ # noqa: DOC404 yield from SyncIterator( self.__async_client.events( @@ -168,11 +168,11 @@ def retrieve(self, id: int | None = None, external_id: str | None = None) -> Eve `Retrieve a single event by id. `_ Args: - id (int | None): ID - external_id (str | None): External ID + id: ID + external_id: External ID Returns: - Event | None: Requested event or None if it does not exist. + Requested event or None if it does not exist. Examples: @@ -199,12 +199,12 @@ def retrieve_multiple( `Retrieve multiple events by id. `_ Args: - ids (Sequence[int] | None): IDs - external_ids (SequenceNotStr[str] | None): External IDs - ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception. + ids: IDs + external_ids: External IDs + ignore_unknown_ids: Ignore IDs and external IDs that are not found rather than throw an exception. Returns: - EventList: The requested events. + The requested events. Examples: @@ -236,13 +236,13 @@ def aggregate_unique_values( `Get unique properties with counts for events. `_ Args: - filter (EventFilter | dict[str, Any] | None): The filter to narrow down the events to count requiring exact match. - property (EventPropertyLike | None): The property name(s) to apply the aggregation on. - advanced_filter (Filter | dict[str, Any] | None): The filter to narrow down the events to consider. - aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets. + filter: The filter to narrow down the events to count requiring exact match. + property: The property name(s) to apply the aggregation on. + advanced_filter: The filter to narrow down the events to consider. + aggregate_filter: The filter to apply to the resulting buckets. Returns: - UniqueResultList: List of unique values of events matching the specified filters and search. + List of unique values of events matching the specified filters and search. Examples: @@ -292,13 +292,12 @@ def aggregate_count( `Count of event matching the specified filters. `_ Args: - property (EventPropertyLike | None): If specified, Get an approximate number of Events with a specific property - (property is not null) and matching the filters. - advanced_filter (Filter | dict[str, Any] | None): The filter to narrow down the events to count. - filter (EventFilter | dict[str, Any] | None): The filter to narrow down the events to count requiring exact match. + property: If specified, Get an approximate number of Events with a specific property (property is not null) and matching the filters. + advanced_filter: The filter to narrow down the events to count. + filter: The filter to narrow down the events to count requiring exact match. Returns: - int: The number of events matching the specified filters and search. + The number of events matching the specified filters and search. Examples: @@ -333,12 +332,12 @@ def aggregate_cardinality_values( `Find approximate property count for events. `_ Args: - property (EventPropertyLike): The property to count the cardinality of. - advanced_filter (Filter | dict[str, Any] | None): The filter to narrow down the events to count cardinality. - aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets. - filter (EventFilter | dict[str, Any] | None): The filter to narrow down the events to count requiring exact match. + property: The property to count the cardinality of. + advanced_filter: The filter to narrow down the events to count cardinality. + aggregate_filter: The filter to apply to the resulting buckets. + filter: The filter to narrow down the events to count requiring exact match. Returns: - int: The number of properties matching the specified filter. + The number of properties matching the specified filter. Examples: @@ -374,13 +373,12 @@ def aggregate_cardinality_properties( `Find approximate paths count for events. `_ Args: - path (EventPropertyLike): The scope in every document to aggregate properties. The only value allowed now is ["metadata"]. - It means to aggregate only metadata properties (aka keys). - advanced_filter (Filter | dict[str, Any] | None): The filter to narrow down the events to count cardinality. - aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets. - filter (EventFilter | dict[str, Any] | None): The filter to narrow down the events to count requiring exact match. + path: The scope in every document to aggregate properties. The only value allowed now is ["metadata"]. It means to aggregate only metadata properties (aka keys). + advanced_filter: The filter to narrow down the events to count cardinality. + aggregate_filter: The filter to apply to the resulting buckets. + filter: The filter to narrow down the events to count requiring exact match. Returns: - int: The number of properties matching the specified filters and search. + The number of properties matching the specified filters and search. Examples: @@ -409,14 +407,13 @@ def aggregate_unique_properties( `Get unique paths with counts for events. `_ Args: - path (EventPropertyLike): The scope in every document to aggregate properties. The only value allowed now is ["metadata"]. - It means to aggregate only metadata properties (aka keys). - advanced_filter (Filter | dict[str, Any] | None): The filter to narrow down the events to count cardinality. - aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets. - filter (EventFilter | dict[str, Any] | None): The filter to narrow down the events to count requiring exact match. + path: The scope in every document to aggregate properties. The only value allowed now is ["metadata"]. It means to aggregate only metadata properties (aka keys). + advanced_filter: The filter to narrow down the events to count cardinality. + aggregate_filter: The filter to apply to the resulting buckets. + filter: The filter to narrow down the events to count requiring exact match. Returns: - UniqueResultList: List of unique values of events matching the specified filters and search. + List of unique values of events matching the specified filters and search. Examples: @@ -446,10 +443,10 @@ def create(self, event: Event | EventWrite | Sequence[Event] | Sequence[EventWri `Create one or more events. `_ Args: - event (Event | EventWrite | Sequence[Event] | Sequence[EventWrite]): Event or list of events to create. + event: Event or list of events to create. Returns: - Event | EventList: Created event(s) + Created event(s) Examples: @@ -474,9 +471,9 @@ def delete( `Delete one or more events `_ Args: - id (int | Sequence[int] | None): Id or list of ids - external_id (str | SequenceNotStr[str] | None): External ID or list of external ids - ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception. + id: Id or list of ids + external_id: External ID or list of external ids + ignore_unknown_ids: Ignore IDs and external IDs that are not found rather than throw an exception. Examples: @@ -514,11 +511,11 @@ def update( `Update one or more events `_ Args: - item (Event | EventWrite | EventUpdate | Sequence[Event | EventWrite | EventUpdate]): Event(s) to update - mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (Event or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. + item: Event(s) to update + mode: How to update data when a non-update object is given (Event or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. Returns: - Event | EventList: Updated event(s) + Updated event(s) Examples: @@ -550,12 +547,12 @@ def search( Primarily meant for human-centric use-cases and data exploration, not for programs, since matching and ordering may change over time. Use the `list` function if stable or exact matches are required. Args: - description (str | None): Fuzzy match on description. - filter (EventFilter | dict[str, Any] | None): Filter to apply. Performs exact match on these fields. - limit (int): Maximum number of results to return. + description: Fuzzy match on description. + filter: Filter to apply. Performs exact match on these fields. + limit: Maximum number of results to return. Returns: - EventList: List of requested events + List of requested events Examples: @@ -585,11 +582,11 @@ def upsert( For more details, see :ref:`appendix-upsert`. Args: - item (Event | EventWrite | Sequence[Event | EventWrite]): Event or list of events to upsert. - mode (Literal['patch', 'replace']): Whether to patch or replace in the case the events are existing. If you set 'patch', the call will only update fields with non-null values (default). Setting 'replace' will unset any fields that are not specified. + item: Event or list of events to upsert. + mode: Whether to patch or replace in the case the events are existing. If you set 'patch', the call will only update fields with non-null values (default). Setting 'replace' will unset any fields that are not specified. Returns: - Event | EventList: The upserted event(s). + The upserted event(s). Examples: @@ -633,29 +630,29 @@ def list( `List events `_ Args: - start_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. - end_time (dict[str, Any] | EndTimeFilter | None): Range between two timestamps. - active_at_time (dict[str, Any] | TimestampRange | None): Event is considered active from its startTime to endTime inclusive. If startTime is null, event is never active. If endTime is null, event is active from startTime onwards. activeAtTime filter will match all events that are active at some point from min to max, from min, or to max, depending on which of min and max parameters are specified. - type (str | None): Type of the event, e.g 'failure'. - subtype (str | None): Subtype of the event, e.g 'electrical'. - metadata (dict[str, str] | None): Customizable extra data about the event. String key -> String value. - asset_ids (Sequence[int] | None): Asset IDs of related equipments that this event relates to. - asset_external_ids (SequenceNotStr[str] | None): Asset External IDs of related equipment that this event relates to. - asset_subtree_ids (int | Sequence[int] | None): Only include events that have a related asset in a subtree rooted at any of these assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. - asset_subtree_external_ids (str | SequenceNotStr[str] | None): Only include events that have a related asset in a subtree rooted at any of these assetExternalIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. - data_set_ids (int | Sequence[int] | None): Return only events in the specified data set(s) with this id / these ids. - data_set_external_ids (str | SequenceNotStr[str] | None): Return only events in the specified data set(s) with this external id / these external ids. - source (str | None): The source of this event. - created_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. - last_updated_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. - external_id_prefix (str | None): External Id provided by client. Should be unique within the project. - sort (SortSpec | list[SortSpec] | None): The criteria to sort by. Defaults to desc for `_score_` and asc for all other properties. Sort is not allowed if `partitions` is used. - partitions (int | None): Retrieve resources in parallel using this number of workers (values up to 10 allowed), limit must be set to `None` (or `-1`). - limit (int | None): Maximum number of events to return. Defaults to 25. Set to -1, float("inf") or None to return all items. - advanced_filter (Filter | dict[str, Any] | None): Advanced filter query using the filter DSL (Domain Specific Language). It allows defining complex filtering expressions that combine simple operations, such as equals, prefix, exists, etc., using boolean operators and, or, and not. See examples below for usage. + start_time: Range between two timestamps. + end_time: Range between two timestamps. + active_at_time: Event is considered active from its startTime to endTime inclusive. If startTime is null, event is never active. If endTime is null, event is active from startTime onwards. activeAtTime filter will match all events that are active at some point from min to max, from min, or to max, depending on which of min and max parameters are specified. + type: Type of the event, e.g 'failure'. + subtype: Subtype of the event, e.g 'electrical'. + metadata: Customizable extra data about the event. String key -> String value. + asset_ids: Asset IDs of related equipments that this event relates to. + asset_external_ids: Asset External IDs of related equipment that this event relates to. + asset_subtree_ids: Only include events that have a related asset in a subtree rooted at any of these assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. + asset_subtree_external_ids: Only include events that have a related asset in a subtree rooted at any of these assetExternalIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. + data_set_ids: Return only events in the specified data set(s) with this id / these ids. + data_set_external_ids: Return only events in the specified data set(s) with this external id / these external ids. + source: The source of this event. + created_time: Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. + last_updated_time: Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. + external_id_prefix: External Id provided by client. Should be unique within the project. + sort: The criteria to sort by. Defaults to desc for `_score_` and asc for all other properties. Sort is not allowed if `partitions` is used. + partitions: Retrieve resources in parallel using this number of workers (values up to 10 allowed), limit must be set to `None` (or `-1`). + limit: Maximum number of events to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + advanced_filter: Advanced filter query using the filter DSL (Domain Specific Language). It allows defining complex filtering expressions that combine simple operations, such as equals, prefix, exists, etc., using boolean operators and, or, and not. See examples below for usage. Returns: - EventList: List of requested events + List of requested events .. note:: When using `partitions`, there are few considerations to keep in mind: diff --git a/cognite/client/_sync_api/extractionpipelines/__init__.py b/cognite/client/_sync_api/extractionpipelines/__init__.py index 972dc558f9..8edf093419 100644 --- a/cognite/client/_sync_api/extractionpipelines/__init__.py +++ b/cognite/client/_sync_api/extractionpipelines/__init__.py @@ -1,6 +1,6 @@ """ =============================================================================== -b744d4b33b393daa44929d66f3de741f +a771aa9244ebb82cb3f6f9514e495218 This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -45,11 +45,11 @@ def __call__( Iterate over extraction pipelines Args: - chunk_size (int | None): Number of extraction pipelines to yield per chunk. Defaults to yielding extraction pipelines one by one. - limit (int | None): Limits the number of results to be returned. Defaults to yielding all extraction pipelines. + chunk_size: Number of extraction pipelines to yield per chunk. Defaults to yielding extraction pipelines one by one. + limit: Limits the number of results to be returned. Defaults to yielding all extraction pipelines. Yields: - ExtractionPipeline | ExtractionPipelineList: Yields extraction pipelines one by one or in chunks up to the chunk size. + Yields extraction pipelines one by one or in chunks up to the chunk size. """ # noqa: DOC404 yield from SyncIterator(self.__async_client.extraction_pipelines(chunk_size=chunk_size, limit=limit)) # type: ignore [misc] @@ -58,11 +58,11 @@ def retrieve(self, id: int | None = None, external_id: str | None = None) -> Ext `Retrieve a single extraction pipeline by id. `_ Args: - id (int | None): ID - external_id (str | None): External ID + id: ID + external_id: External ID Returns: - ExtractionPipeline | None: Requested extraction pipeline or None if it does not exist. + Requested extraction pipeline or None if it does not exist. Examples: @@ -89,12 +89,12 @@ def retrieve_multiple( `Retrieve multiple extraction pipelines by ids and external ids. `_ Args: - ids (Sequence[int] | None): IDs - external_ids (SequenceNotStr[str] | None): External IDs - ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception. + ids: IDs + external_ids: External IDs + ignore_unknown_ids: Ignore IDs and external IDs that are not found rather than throw an exception. Returns: - ExtractionPipelineList: The requested ExtractionPipelines. + The requested ExtractionPipelines. Examples: @@ -120,10 +120,10 @@ def list(self, limit: int | None = DEFAULT_LIMIT_READ) -> ExtractionPipelineList `List extraction pipelines `_ Args: - limit (int | None): Maximum number of ExtractionPipelines to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + limit: Maximum number of ExtractionPipelines to return. Defaults to 25. Set to -1, float("inf") or None to return all items. Returns: - ExtractionPipelineList: List of requested ExtractionPipelines + List of requested ExtractionPipelines Examples: @@ -157,10 +157,10 @@ def create( You can create an arbitrary number of extraction pipelines, and the SDK will split the request into multiple requests if necessary. Args: - extraction_pipeline (ExtractionPipeline | ExtractionPipelineWrite | Sequence[ExtractionPipeline] | Sequence[ExtractionPipelineWrite]): Extraction pipeline or list of extraction pipelines to create. + extraction_pipeline: Extraction pipeline or list of extraction pipelines to create. Returns: - ExtractionPipeline | ExtractionPipelineList: Created extraction pipeline(s) + Created extraction pipeline(s) Examples: @@ -182,8 +182,8 @@ def delete( `Delete one or more extraction pipelines `_ Args: - id (int | Sequence[int] | None): Id or list of ids - external_id (str | SequenceNotStr[str] | None): External ID or list of external ids + id: Id or list of ids + external_id: External ID or list of external ids Examples: @@ -218,11 +218,11 @@ def update( `Update one or more extraction pipelines `_ Args: - item (ExtractionPipeline | ExtractionPipelineWrite | ExtractionPipelineUpdate | Sequence[ExtractionPipeline | ExtractionPipelineWrite | ExtractionPipelineUpdate]): Extraction pipeline(s) to update - mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (ExtractionPipeline or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. + item: Extraction pipeline(s) to update + mode: How to update data when a non-update object is given (ExtractionPipeline or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. Returns: - ExtractionPipeline | ExtractionPipelineList: Updated extraction pipeline(s) + Updated extraction pipeline(s) Examples: diff --git a/cognite/client/_sync_api/extractionpipelines/configs.py b/cognite/client/_sync_api/extractionpipelines/configs.py index b9f5ac51e6..e764861061 100644 --- a/cognite/client/_sync_api/extractionpipelines/configs.py +++ b/cognite/client/_sync_api/extractionpipelines/configs.py @@ -1,6 +1,6 @@ """ =============================================================================== -cf9f53bea1f0088b6d96e0d4648e01af +692889cfa806d6054133ce49c7cfe807 This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -29,12 +29,12 @@ def retrieve( By default the latest configuration revision is retrieved, or you can specify a timestamp or a revision number. Args: - external_id (str): External id of the extraction pipeline to retrieve config from. - revision (int | None): Optionally specify a revision number to retrieve. - active_at_time (int | None): Optionally specify a timestamp the configuration revision should be active. + external_id: External id of the extraction pipeline to retrieve config from. + revision: Optionally specify a revision number to retrieve. + active_at_time: Optionally specify a timestamp the configuration revision should be active. Returns: - ExtractionPipelineConfig: Retrieved extraction pipeline configuration revision + Retrieved extraction pipeline configuration revision Examples: @@ -56,10 +56,10 @@ def list(self, external_id: str) -> ExtractionPipelineConfigRevisionList: `Retrieve all configuration revisions from an extraction pipeline ` Args: - external_id (str): External id of the extraction pipeline to retrieve config from. + external_id: External id of the extraction pipeline to retrieve config from. Returns: - ExtractionPipelineConfigRevisionList: Retrieved extraction pipeline configuration revisions + Retrieved extraction pipeline configuration revisions Examples: @@ -77,10 +77,10 @@ def create(self, config: ExtractionPipelineConfig | ExtractionPipelineConfigWrit `Create a new configuration revision ` Args: - config (ExtractionPipelineConfig | ExtractionPipelineConfigWrite): Configuration revision to create. + config: Configuration revision to create. Returns: - ExtractionPipelineConfig: Created extraction pipeline configuration revision + Created extraction pipeline configuration revision Examples: @@ -98,11 +98,11 @@ def revert(self, external_id: str, revision: int) -> ExtractionPipelineConfig: `Revert to a previous configuration revision ` Args: - external_id (str): External id of the extraction pipeline to revert revision for. - revision (int): Revision to revert to. + external_id: External id of the extraction pipeline to revert revision for. + revision: Revision to revert to. Returns: - ExtractionPipelineConfig: New latest extraction pipeline configuration revision. + New latest extraction pipeline configuration revision. Examples: diff --git a/cognite/client/_sync_api/extractionpipelines/runs.py b/cognite/client/_sync_api/extractionpipelines/runs.py index fecbed6d06..fc5b2c75fd 100644 --- a/cognite/client/_sync_api/extractionpipelines/runs.py +++ b/cognite/client/_sync_api/extractionpipelines/runs.py @@ -1,6 +1,6 @@ """ =============================================================================== -c7e0250a7afdf41370a375942043efcf +b0413359a7aacb20d810982f2fd2dd38 This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -44,15 +44,14 @@ def list( `List runs for an extraction pipeline with given external_id `_ Args: - external_id (str): Extraction pipeline external Id. - statuses (RunStatus | Sequence[RunStatus] | SequenceNotStr[str] | None): One or more among "success" / "failure" / "seen". - message_substring (str | None): Failure message part. - created_time (dict[str, Any] | TimestampRange | str | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as timestamps in ms. - If a string is passed, it is assumed to be the minimum value. - limit (int | None): Maximum number of ExtractionPipelines to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + external_id: Extraction pipeline external Id. + statuses: One or more among "success" / "failure" / "seen". + message_substring: Failure message part. + created_time: Range between two timestamps. Possible keys are `min` and `max`, with values given as timestamps in ms. If a string is passed, it is assumed to be the minimum value. + limit: Maximum number of ExtractionPipelines to return. Defaults to 25. Set to -1, float("inf") or None to return all items. Returns: - ExtractionPipelineRunList: List of requested extraction pipeline runs + List of requested extraction pipeline runs Tip: The ``created_time`` parameter can also be passed as a string, to support the most typical usage pattern @@ -108,10 +107,10 @@ def create( You can create an arbitrary number of extraction pipeline runs, and the SDK will split the request into multiple requests. Args: - run (ExtractionPipelineRun | ExtractionPipelineRunWrite | Sequence[ExtractionPipelineRun] | Sequence[ExtractionPipelineRunWrite]): ExtractionPipelineRun| ExtractionPipelineRunWrite | Sequence[ExtractionPipelineRun] | Sequence[ExtractionPipelineRunWrite]): Extraction pipeline or list of extraction pipeline runs to create. + run: Extraction pipeline or list of extraction pipeline runs to create. Returns: - ExtractionPipelineRun | ExtractionPipelineRunList: Created extraction pipeline run(s) + Created extraction pipeline run(s) Examples: diff --git a/cognite/client/_sync_api/files.py b/cognite/client/_sync_api/files.py index 2ed4650198..df195144f9 100644 --- a/cognite/client/_sync_api/files.py +++ b/cognite/client/_sync_api/files.py @@ -1,6 +1,6 @@ """ =============================================================================== -f1a19d82e7ff74dfade4b2cf571130e6 +556eda2d60da1de60841197b953408da This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -123,31 +123,31 @@ def __call__( Fetches file metadata objects as they are iterated over, so you keep a limited number of metadata objects in memory. Args: - chunk_size (int | None): Number of files to return in each chunk. Defaults to yielding one event a time. - name (str | None): Name of the file. - mime_type (str | None): File type. E.g. text/plain, application/pdf, .. - metadata (dict[str, str] | None): Custom, application specific metadata. String key -> String value - asset_ids (Sequence[int] | None): Only include files that reference these specific asset IDs. - asset_external_ids (SequenceNotStr[str] | None): No description. - asset_subtree_ids (int | Sequence[int] | None): Only include files that have a related asset in a subtree rooted at any of these assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. - asset_subtree_external_ids (str | SequenceNotStr[str] | None): Only include files that have a related asset in a subtree rooted at any of these assetExternalIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. - data_set_ids (int | Sequence[int] | None): Return only files in the specified data set(s) with this id / these ids. - data_set_external_ids (str | SequenceNotStr[str] | None): Return only files in the specified data set(s) with this external id / these external ids. - labels (LabelFilter | None): Return only the files matching the specified label(s). - geo_location (GeoLocationFilter | None): Only include files matching the specified geographic relation. - source (str | None): The source of this event. - created_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. - last_updated_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. - source_created_time (dict[str, Any] | TimestampRange | None): Filter for files where the sourceCreatedTime field has been set and is within the specified range. - source_modified_time (dict[str, Any] | TimestampRange | None): Filter for files where the sourceModifiedTime field has been set and is within the specified range. - uploaded_time (dict[str, Any] | TimestampRange | None): Range between two timestamps - external_id_prefix (str | None): External Id provided by client. Should be unique within the project. - directory_prefix (str | None): Filter by this (case-sensitive) prefix for the directory provided by the client. - uploaded (bool | None): Whether or not the actual file is uploaded. This field is returned only by the API, it has no effect in a post body. - limit (int | None): Maximum number of files to return. Defaults to return all items. + chunk_size: Number of files to return in each chunk. Defaults to yielding one event a time. + name: Name of the file. + mime_type: File type. E.g. text/plain, application/pdf, .. + metadata: Custom, application specific metadata. String key -> String value + asset_ids: Only include files that reference these specific asset IDs. + asset_external_ids: No description. + asset_subtree_ids: Only include files that have a related asset in a subtree rooted at any of these assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. + asset_subtree_external_ids: Only include files that have a related asset in a subtree rooted at any of these assetExternalIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. + data_set_ids: Return only files in the specified data set(s) with this id / these ids. + data_set_external_ids: Return only files in the specified data set(s) with this external id / these external ids. + labels: Return only the files matching the specified label(s). + geo_location: Only include files matching the specified geographic relation. + source: The source of this event. + created_time: Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. + last_updated_time: Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. + source_created_time: Filter for files where the sourceCreatedTime field has been set and is within the specified range. + source_modified_time: Filter for files where the sourceModifiedTime field has been set and is within the specified range. + uploaded_time: Range between two timestamps + external_id_prefix: External Id provided by client. Should be unique within the project. + directory_prefix: Filter by this (case-sensitive) prefix for the directory provided by the client. + uploaded: Whether or not the actual file is uploaded. This field is returned only by the API, it has no effect in a post body. + limit: Maximum number of files to return. Defaults to return all items. Yields: - FileMetadata | FileMetadataList: yields FileMetadata one by one if chunk_size is not specified, else FileMetadataList objects. + yields FileMetadata one by one if chunk_size is not specified, else FileMetadataList objects. """ # noqa: DOC404 yield from SyncIterator( self.__async_client.files( @@ -183,11 +183,11 @@ def create( Create file without uploading content. Args: - file_metadata (FileMetadata | FileMetadataWrite): File metadata for the file to create. - overwrite (bool): If 'overwrite' is set to true, and the POST body content specifies a 'externalId' field, fields for the file found for externalId can be overwritten. The default setting is false. If metadata is included in the request body, all of the original metadata will be overwritten. File-Asset mappings only change if explicitly stated in the assetIds field of the POST json body. Do not set assetIds in request body if you want to keep the current file-asset mappings. + file_metadata: File metadata for the file to create. + overwrite: If 'overwrite' is set to true, and the POST body content specifies a 'externalId' field, fields for the file found for externalId can be overwritten. The default setting is false. If metadata is included in the request body, all of the original metadata will be overwritten. File-Asset mappings only change if explicitly stated in the assetIds field of the POST json body. Do not set assetIds in request body if you want to keep the current file-asset mappings. Returns: - tuple[FileMetadata, str]: Tuple containing the file metadata and upload url of the created file. + Tuple containing the file metadata and upload url of the created file. Examples: @@ -209,12 +209,12 @@ def retrieve( `Retrieve a single file metadata by id. `_ Args: - id (int | None): ID - external_id (str | None): External ID - instance_id (NodeId | None): Instance ID + id: ID + external_id: External ID + instance_id: Instance ID Returns: - FileMetadata | None: Requested file metadata or None if it does not exist. + Requested file metadata or None if it does not exist. Examples: @@ -242,13 +242,13 @@ def retrieve_multiple( `Retrieve multiple file metadatas by id. `_ Args: - ids (Sequence[int] | None): IDs - external_ids (SequenceNotStr[str] | None): External IDs - instance_ids (Sequence[NodeId] | None): Instance IDs - ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception. + ids: IDs + external_ids: External IDs + instance_ids: Instance IDs + ignore_unknown_ids: Ignore IDs and external IDs that are not found rather than throw an exception. Returns: - FileMetadataList: The requested file metadatas. + The requested file metadatas. Examples: @@ -274,10 +274,10 @@ def aggregate_count(self, filter: FileMetadataFilter | dict[str, Any] | None = N `Aggregate files `_ Args: - filter (FileMetadataFilter | dict[str, Any] | None): Filter on file metadata filter with exact match + filter: Filter on file metadata filter with exact match Returns: - int: Count of files matching the filter. + Count of files matching the filter. Examples: @@ -300,9 +300,9 @@ def delete( `Delete files `_ Args: - id (int | Sequence[int] | None): Id or list of ids - external_id (str | SequenceNotStr[str] | None): str or list of str - ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception. + id: Id or list of ids + external_id: str or list of str + ignore_unknown_ids: Ignore IDs and external IDs that are not found rather than throw an exception. Examples: @@ -344,11 +344,11 @@ def update( Currently, a full replacement of labels on a file is not supported (only partial add/remove updates). See the example below on how to perform partial labels update. Args: - item (FileMetadata | FileMetadataWrite | FileMetadataUpdate | Sequence[FileMetadata | FileMetadataWrite | FileMetadataUpdate]): file(s) to update. - mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (FilesMetadata or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. + item: file(s) to update. + mode: How to update data when a non-update object is given (FilesMetadata or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. Returns: - FileMetadata | FileMetadataList: The updated files. + The updated files. Examples: @@ -392,12 +392,12 @@ def search( Primarily meant for human-centric use-cases and data exploration, not for programs, since matching and ordering may change over time. Use the `list` function if stable or exact matches are required. Args: - name (str | None): Prefix and fuzzy search on name. - filter (FileMetadataFilter | dict[str, Any] | None): Filter to apply. Performs exact match on these fields. - limit (int): Max number of results to return. + name: Prefix and fuzzy search on name. + filter: Filter to apply. Performs exact match on these fields. + limit: Max number of results to return. Returns: - FileMetadataList: List of requested files metadata. + List of requested files metadata. Examples: @@ -422,11 +422,11 @@ def upload_content( `Upload a file content `_ Args: - path (Path | str): Path to the file you wish to upload. - external_id (str | None): The external ID provided by the client. Must be unique within the project. - instance_id (NodeId | None): Instance ID of the file. + path: Path to the file you wish to upload. + external_id: The external ID provided by the client. Must be unique within the project. + instance_id: Instance ID of the file. Returns: - FileMetadata: No description. + No description. """ return run_sync( self.__async_client.files.upload_content(path=path, external_id=external_id, instance_id=instance_id) @@ -455,25 +455,25 @@ def upload( `Upload a file `_ Args: - path (Path | str): Path to the file you wish to upload. If path is a directory, this method will upload all files in that directory. - external_id (str | None): The external ID provided by the client. Must be unique within the project. - name (str | None): Name of the file. - source (str | None): The source of the file. - mime_type (str | None): File type. E.g. text/plain, application/pdf, ... - metadata (dict[str, str] | None): Customizable extra data about the file. String key -> String value. - directory (str | None): The directory to be associated with this file. Must be an absolute, unix-style path. - asset_ids (Sequence[int] | None): No description. - source_created_time (int | None): The timestamp for when the file was originally created in the source system. - source_modified_time (int | None): The timestamp for when the file was last modified in the source system. - data_set_id (int | None): ID of the data set. - labels (Sequence[Label] | None): A list of the labels associated with this resource item. - geo_location (GeoLocation | None): The geographic metadata of the file. - security_categories (Sequence[int] | None): Security categories to attach to this file. - recursive (bool): If path is a directory, upload all contained files recursively. - overwrite (bool): If 'overwrite' is set to true, and the POST body content specifies a 'externalId' field, fields for the file found for externalId can be overwritten. The default setting is false. If metadata is included in the request body, all of the original metadata will be overwritten. The actual file will be overwritten after successful upload. If there is no successful upload, the current file contents will be kept. File-Asset mappings only change if explicitly stated in the assetIds field of the POST json body. Do not set assetIds in request body if you want to keep the current file-asset mappings. + path: Path to the file you wish to upload. If path is a directory, this method will upload all files in that directory. + external_id: The external ID provided by the client. Must be unique within the project. + name: Name of the file. + source: The source of the file. + mime_type: File type. E.g. text/plain, application/pdf, ... + metadata: Customizable extra data about the file. String key -> String value. + directory: The directory to be associated with this file. Must be an absolute, unix-style path. + asset_ids: No description. + source_created_time: The timestamp for when the file was originally created in the source system. + source_modified_time: The timestamp for when the file was last modified in the source system. + data_set_id: ID of the data set. + labels: A list of the labels associated with this resource item. + geo_location: The geographic metadata of the file. + security_categories: Security categories to attach to this file. + recursive: If path is a directory, upload all contained files recursively. + overwrite: If 'overwrite' is set to true, and the POST body content specifies a 'externalId' field, fields for the file found for externalId can be overwritten. The default setting is false. If metadata is included in the request body, all of the original metadata will be overwritten. The actual file will be overwritten after successful upload. If there is no successful upload, the current file contents will be kept. File-Asset mappings only change if explicitly stated in the assetIds field of the POST json body. Do not set assetIds in request body if you want to keep the current file-asset mappings. Returns: - FileMetadata | FileMetadataList: The file metadata of the uploaded file(s). + The file metadata of the uploaded file(s). Examples: @@ -541,12 +541,12 @@ def upload_content_bytes( Note that the maximum file size is 5GiB. In order to upload larger files use `multipart_upload_content_session`. Args: - content (str | bytes | BinaryIO): The content to upload. - external_id (str | None): The external ID provided by the client. Must be unique within the project. - instance_id (NodeId | None): Instance ID of the file. + content: The content to upload. + external_id: The external ID provided by the client. Must be unique within the project. + instance_id: Instance ID of the file. Returns: - FileMetadata: No description. + No description. Examples: @@ -596,24 +596,24 @@ def upload_bytes( Note that the maximum file size is 5GiB. In order to upload larger files use `multipart_upload_session`. Args: - content (str | bytes | BinaryIO | AsyncIterator[bytes]): The content to upload. - name (str): Name of the file. - external_id (str | None): The external ID provided by the client. Must be unique within the project. - source (str | None): The source of the file. - mime_type (str | None): File type. E.g. text/plain, application/pdf,... - metadata (dict[str, str] | None): Customizable extra data about the file. String key -> String value. - directory (str | None): The directory to be associated with this file. Must be an absolute, unix-style path. - asset_ids (Sequence[int] | None): No description. - data_set_id (int | None): Id of the data set. - labels (Sequence[Label] | None): A list of the labels associated with this resource item. - geo_location (GeoLocation | None): The geographic metadata of the file. - source_created_time (int | None): The timestamp for when the file was originally created in the source system. - source_modified_time (int | None): The timestamp for when the file was last modified in the source system. - security_categories (Sequence[int] | None): Security categories to attach to this file. - overwrite (bool): If 'overwrite' is set to true, and the POST body content specifies a 'externalId' field, fields for the file found for externalId can be overwritten. The default setting is false. If metadata is included in the request body, all of the original metadata will be overwritten. The actual file will be overwritten after successful upload. If there is no successful upload, the current file contents will be kept. File-Asset mappings only change if explicitly stated in the assetIds field of the POST json body. Do not set assetIds in request body if you want to keep the current file-asset mappings. + content: The content to upload. + name: Name of the file. + external_id: The external ID provided by the client. Must be unique within the project. + source: The source of the file. + mime_type: File type. E.g. text/plain, application/pdf,... + metadata: Customizable extra data about the file. String key -> String value. + directory: The directory to be associated with this file. Must be an absolute, unix-style path. + asset_ids: No description. + data_set_id: Id of the data set. + labels: A list of the labels associated with this resource item. + geo_location: The geographic metadata of the file. + source_created_time: The timestamp for when the file was originally created in the source system. + source_modified_time: The timestamp for when the file was last modified in the source system. + security_categories: Security categories to attach to this file. + overwrite: If 'overwrite' is set to true, and the POST body content specifies a 'externalId' field, fields for the file found for externalId can be overwritten. The default setting is false. If metadata is included in the request body, all of the original metadata will be overwritten. The actual file will be overwritten after successful upload. If there is no successful upload, the current file contents will be kept. File-Asset mappings only change if explicitly stated in the assetIds field of the POST json body. Do not set assetIds in request body if you want to keep the current file-asset mappings. Returns: - FileMetadata: The metadata of the uploaded file. + The metadata of the uploaded file. Examples: @@ -674,24 +674,24 @@ def multipart_upload_session( for each part before exiting. It also supports async usage with `async with`, then calling `await upload_part_async`. Args: - name (str): Name of the file. - parts (int): The number of parts to upload, must be between 1 and 250. - external_id (str | None): The external ID provided by the client. Must be unique within the project. - source (str | None): The source of the file. - mime_type (str | None): File type. E.g. text/plain, application/pdf,... - metadata (dict[str, str] | None): Customizable extra data about the file. String key -> String value. - directory (str | None): The directory to be associated with this file. Must be an absolute, unix-style path. - asset_ids (Sequence[int] | None): No description. - data_set_id (int | None): Id of the data set. - labels (Sequence[Label] | None): A list of the labels associated with this resource item. - geo_location (GeoLocation | None): The geographic metadata of the file. - source_created_time (int | None): The timestamp for when the file was originally created in the source system. - source_modified_time (int | None): The timestamp for when the file was last modified in the source system. - security_categories (Sequence[int] | None): Security categories to attach to this file. - overwrite (bool): If 'overwrite' is set to true, and the POST body content specifies a 'externalId' field, fields for the file found for externalId can be overwritten. The default setting is false. If metadata is included in the request body, all of the original metadata will be overwritten. The actual file will be overwritten after successful upload. If there is no successful upload, the current file contents will be kept. File-Asset mappings only change if explicitly stated in the assetIds field of the POST json body. Do not set assetIds in request body if you want to keep the current file-asset mappings. + name: Name of the file. + parts: The number of parts to upload, must be between 1 and 250. + external_id: The external ID provided by the client. Must be unique within the project. + source: The source of the file. + mime_type: File type. E.g. text/plain, application/pdf,... + metadata: Customizable extra data about the file. String key -> String value. + directory: The directory to be associated with this file. Must be an absolute, unix-style path. + asset_ids: No description. + data_set_id: Id of the data set. + labels: A list of the labels associated with this resource item. + geo_location: The geographic metadata of the file. + source_created_time: The timestamp for when the file was originally created in the source system. + source_modified_time: The timestamp for when the file was last modified in the source system. + security_categories: Security categories to attach to this file. + overwrite: If 'overwrite' is set to true, and the POST body content specifies a 'externalId' field, fields for the file found for externalId can be overwritten. The default setting is false. If metadata is included in the request body, all of the original metadata will be overwritten. The actual file will be overwritten after successful upload. If there is no successful upload, the current file contents will be kept. File-Asset mappings only change if explicitly stated in the assetIds field of the POST json body. Do not set assetIds in request body if you want to keep the current file-asset mappings. Returns: - FileMultipartUploadSession: Object containing metadata about the created file, and information needed to upload the file content. Use this object to manage the file upload, and `exit` it once all parts are uploaded. + Object containing metadata about the created file, and information needed to upload the file content. Use this object to manage the file upload, and `exit` it once all parts are uploaded. Examples: @@ -740,12 +740,12 @@ def multipart_upload_content_session( for each part before exiting. It also supports async usage with `async with`, then calling `await upload_part_async`. Args: - parts (int): The number of parts to upload, must be between 1 and 250. - external_id (str | None): The external ID provided by the client. Must be unique within the project. - instance_id (NodeId | None): Instance ID of the file. + parts: The number of parts to upload, must be between 1 and 250. + external_id: The external ID provided by the client. Must be unique within the project. + instance_id: Instance ID of the file. Returns: - FileMultipartUploadSession: Object containing metadata about the created file, and information needed to upload the file content. Use this object to manage the file upload, and `exit` it once all parts are uploaded. + Object containing metadata about the created file, and information needed to upload the file content. Use this object to manage the file upload, and `exit` it once all parts are uploaded. Examples: @@ -776,13 +776,13 @@ def retrieve_download_urls( Get download links by id or external id Args: - id (int | Sequence[int] | None): Id or list of ids. - external_id (str | SequenceNotStr[str] | None): External id or list of external ids. - instance_id (NodeId | Sequence[NodeId] | None): Instance id or list of instance ids. - extended_expiration (bool): Extend expiration time of download url to 1 hour. Defaults to false. + id: Id or list of ids. + external_id: External id or list of external ids. + instance_id: Instance id or list of instance ids. + extended_expiration: Extend expiration time of download url to 1 hour. Defaults to false. Returns: - dict[int | str | NodeId, str]: Dictionary containing download urls. + Dictionary containing download urls. """ return run_sync( self.__async_client.files.retrieve_download_urls( @@ -814,13 +814,12 @@ def download( the files missing. A warning is issued when this happens, listing the affected files. Args: - directory (str | Path): Directory to download the file(s) to. - id (int | Sequence[int] | None): Id or list of ids - external_id (str | SequenceNotStr[str] | None): External ID or list of external ids. - instance_id (NodeId | Sequence[NodeId] | None): Instance ID or list of instance ids. - keep_directory_structure (bool): Whether or not to keep the directory hierarchy in CDF, - creating subdirectories as needed below the given directory. - resolve_duplicate_file_names (bool): Whether or not to resolve duplicate file names by appending a number on duplicate file names + directory: Directory to download the file(s) to. + id: Id or list of ids + external_id: External ID or list of external ids. + instance_id: Instance ID or list of instance ids. + keep_directory_structure: Whether or not to keep the directory hierarchy in CDF, creating subdirectories as needed below the given directory. + resolve_duplicate_file_names: Whether or not to resolve duplicate file names by appending a number on duplicate file names Examples: @@ -853,10 +852,10 @@ def download_to_path( Download a file to a specific target. Args: - path (Path | str): Download to this path. - id (int | None): Id of of the file to download. - external_id (str | None): External id of the file to download. - instance_id (NodeId | None): Instance id of the file to download. + path: Download to this path. + id: Id of of the file to download. + external_id: External id of the file to download. + instance_id: Instance id of the file to download. Examples: @@ -879,9 +878,9 @@ def download_bytes( Download a file as bytes. Args: - id (int | None): Id of the file - external_id (str | None): External id of the file - instance_id (NodeId | None): Instance id of the file + id: Id of the file + external_id: External id of the file + instance_id: Instance id of the file Examples: @@ -893,7 +892,7 @@ def download_bytes( >>> file_content = client.files.download_bytes(id=1) Returns: - bytes: The file in binary format + The file in binary format """ return run_sync( self.__async_client.files.download_bytes(id=id, external_id=external_id, instance_id=instance_id) @@ -928,31 +927,31 @@ def list( `List files `_ Args: - name (str | None): Name of the file. - mime_type (str | None): File type. E.g. text/plain, application/pdf, .. - metadata (dict[str, str] | None): Custom, application specific metadata. String key -> String value - asset_ids (Sequence[int] | None): Only include files that reference these specific asset IDs. - asset_external_ids (SequenceNotStr[str] | None): No description. - asset_subtree_ids (int | Sequence[int] | None): Only include files that have a related asset in a subtree rooted at any of these assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. - asset_subtree_external_ids (str | SequenceNotStr[str] | None): Only include files that have a related asset in a subtree rooted at any of these assetExternalIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. - data_set_ids (int | Sequence[int] | None): Return only files in the specified data set(s) with this id / these ids. - data_set_external_ids (str | SequenceNotStr[str] | None): Return only files in the specified data set(s) with this external id / these external ids. - labels (LabelFilter | None): Return only the files matching the specified label filter(s). - geo_location (GeoLocationFilter | None): Only include files matching the specified geographic relation. - source (str | None): The source of this event. - created_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. - last_updated_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. - source_created_time (dict[str, Any] | TimestampRange | None): Filter for files where the sourceCreatedTime field has been set and is within the specified range. - source_modified_time (dict[str, Any] | TimestampRange | None): Filter for files where the sourceModifiedTime field has been set and is within the specified range. - uploaded_time (dict[str, Any] | TimestampRange | None): Range between two timestamps - external_id_prefix (str | None): External Id provided by client. Should be unique within the project. - directory_prefix (str | None): Filter by this (case-sensitive) prefix for the directory provided by the client. - uploaded (bool | None): Whether or not the actual file is uploaded. This field is returned only by the API, it has no effect in a post body. - limit (int | None): Max number of files to return. Defaults to 25. Set to -1, float("inf") or None to return all items. - partitions (int | None): Retrieve resources in parallel using this number of workers (values up to 10 allowed), limit must be set to `None` (or `-1`). + name: Name of the file. + mime_type: File type. E.g. text/plain, application/pdf, .. + metadata: Custom, application specific metadata. String key -> String value + asset_ids: Only include files that reference these specific asset IDs. + asset_external_ids: No description. + asset_subtree_ids: Only include files that have a related asset in a subtree rooted at any of these assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. + asset_subtree_external_ids: Only include files that have a related asset in a subtree rooted at any of these assetExternalIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. + data_set_ids: Return only files in the specified data set(s) with this id / these ids. + data_set_external_ids: Return only files in the specified data set(s) with this external id / these external ids. + labels: Return only the files matching the specified label filter(s). + geo_location: Only include files matching the specified geographic relation. + source: The source of this event. + created_time: Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. + last_updated_time: Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. + source_created_time: Filter for files where the sourceCreatedTime field has been set and is within the specified range. + source_modified_time: Filter for files where the sourceModifiedTime field has been set and is within the specified range. + uploaded_time: Range between two timestamps + external_id_prefix: External Id provided by client. Should be unique within the project. + directory_prefix: Filter by this (case-sensitive) prefix for the directory provided by the client. + uploaded: Whether or not the actual file is uploaded. This field is returned only by the API, it has no effect in a post body. + limit: Max number of files to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + partitions: Retrieve resources in parallel using this number of workers (values up to 10 allowed), limit must be set to `None` (or `-1`). Returns: - FileMetadataList: The requested files. + The requested files. Examples: diff --git a/cognite/client/_sync_api/functions/__init__.py b/cognite/client/_sync_api/functions/__init__.py index 3dbbdad04d..a494e3c570 100644 --- a/cognite/client/_sync_api/functions/__init__.py +++ b/cognite/client/_sync_api/functions/__init__.py @@ -1,6 +1,6 @@ """ =============================================================================== -0f8d2e651991f2abd978834235270f7f +060adb2637439b28e4ec720e7b9f0b51 This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -89,18 +89,18 @@ def __call__( Iterate over functions. Args: - chunk_size (int | None): Number of functions to yield per chunk. Defaults to yielding functions one by one. - name (str | None): The name of the function. - owner (str | None): Owner of the function. - file_id (int | None): The file ID of the zip-file used to create the function. - status (FunctionStatus | None): Status of the function. Possible values: ["Queued", "Deploying", "Ready", "Failed"]. - external_id_prefix (str | None): External ID prefix to filter on. - created_time (dict[Literal['min', 'max'], int] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. - metadata (dict[str, str] | None): No description. - limit (int | None): Maximum number of functions to return. Defaults to yielding all functions. + chunk_size: Number of functions to yield per chunk. Defaults to yielding functions one by one. + name: The name of the function. + owner: Owner of the function. + file_id: The file ID of the zip-file used to create the function. + status: Status of the function. Possible values: ["Queued", "Deploying", "Ready", "Failed"]. + external_id_prefix: External ID prefix to filter on. + created_time: Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. + metadata: No description. + limit: Maximum number of functions to return. Defaults to yielding all functions. Yields: - Function | FunctionList: An iterator over functions. + An iterator over functions. """ # noqa: DOC404 yield from SyncIterator( self.__async_client.functions( @@ -155,28 +155,27 @@ def create( For help with troubleshooting, please see `this page. `_ Args: - name (str | FunctionWrite): The name of the function or a FunctionWrite object. If a FunctionWrite - object is passed, all other arguments are ignored. - folder (str | None): Path to the folder where the function source code is located. - file_id (int | None): File ID of the code uploaded to the Files API. - function_path (str): Relative path from the root folder to the file containing the `handle` function. Defaults to `handler.py`. Must be on POSIX path format. - function_handle (FunctionHandle | None): Reference to a function object, which must be named `handle`. - external_id (str | None): External id of the function. - description (str | None): Description of the function. - owner (str | None): Owner of this function. Typically used to know who created it. - secrets (dict[str, str] | None): Additional secrets as key/value pairs. These can e.g. password to simulators or other data sources. Keys must be lowercase characters, numbers or dashes (-) and at most 15 characters. You can create at most 30 secrets, all keys must be unique. - env_vars (dict[str, str] | None): Environment variables as key/value pairs. Keys can contain only letters, numbers or the underscore character. You can create at most 100 environment variables. - cpu (float | None): Number of CPU cores per function. Allowed range and default value are given by the `limits endpoint. `_, and None translates to the API default. On Azure, only the default value is used. - memory (float | None): Memory per function measured in GB. Allowed range and default value are given by the `limits endpoint. `_, and None translates to the API default. On Azure, only the default value is used. - runtime (RunTime | None): The function runtime. Valid values are ["py310", "py311", "py312", `None`], and `None` translates to the API default which will change over time. The runtime "py312" resolves to the latest version of the Python 3.12 series. - metadata (dict[str, str] | None): Metadata for the function as key/value pairs. Key & values can be at most 32, 512 characters long respectively. You can have at the most 16 key-value pairs, with a maximum size of 512 bytes. - index_url (str | None): Index URL for Python Package Manager to use. Be aware of the intrinsic security implications of using the `index_url` option. `More information can be found on official docs, `_ - extra_index_urls (list[str] | None): Extra Index URLs for Python Package Manager to use. Be aware of the intrinsic security implications of using the `extra_index_urls` option. `More information can be found on official docs, `_ - skip_folder_validation (bool): When creating a function using the 'folder' argument, pass True to skip the extra validation step that attempts to import the module. Skipping can be useful when your function requires several heavy packages to already be installed locally. Defaults to False. - data_set_id (int | None): Data set to upload the function code to. Note: Does not affect the function itself. + name: The name of the function or a FunctionWrite object. If a FunctionWrite object is passed, all other arguments are ignored. + folder: Path to the folder where the function source code is located. + file_id: File ID of the code uploaded to the Files API. + function_path: Relative path from the root folder to the file containing the `handle` function. Defaults to `handler.py`. Must be on POSIX path format. + function_handle: Reference to a function object, which must be named `handle`. + external_id: External id of the function. + description: Description of the function. + owner: Owner of this function. Typically used to know who created it. + secrets: Additional secrets as key/value pairs. These can e.g. password to simulators or other data sources. Keys must be lowercase characters, numbers or dashes (-) and at most 15 characters. You can create at most 30 secrets, all keys must be unique. + env_vars: Environment variables as key/value pairs. Keys can contain only letters, numbers or the underscore character. You can create at most 100 environment variables. + cpu: Number of CPU cores per function. Allowed range and default value are given by the `limits endpoint. `_, and None translates to the API default. On Azure, only the default value is used. + memory: Memory per function measured in GB. Allowed range and default value are given by the `limits endpoint. `_, and None translates to the API default. On Azure, only the default value is used. + runtime: The function runtime. Valid values are ["py310", "py311", "py312", `None`], and `None` translates to the API default which will change over time. The runtime "py312" resolves to the latest version of the Python 3.12 series. + metadata: Metadata for the function as key/value pairs. Key & values can be at most 32, 512 characters long respectively. You can have at the most 16 key-value pairs, with a maximum size of 512 bytes. + index_url: Index URL for Python Package Manager to use. Be aware of the intrinsic security implications of using the `index_url` option. `More information can be found on official docs, `_ + extra_index_urls: Extra Index URLs for Python Package Manager to use. Be aware of the intrinsic security implications of using the `extra_index_urls` option. `More information can be found on official docs, `_ + skip_folder_validation: When creating a function using the 'folder' argument, pass True to skip the extra validation step that attempts to import the module. Skipping can be useful when your function requires several heavy packages to already be installed locally. Defaults to False. + data_set_id: Data set to upload the function code to. Note: Does not affect the function itself. Returns: - Function: The created function. + The created function. Examples: @@ -245,8 +244,8 @@ def delete( `Delete one or more functions. `_ Args: - id (int | Sequence[int] | None): Id or list of ids. - external_id (str | SequenceNotStr[str] | None): External ID or list of external ids. + id: Id or list of ids. + external_id: External ID or list of external ids. Example: @@ -274,17 +273,17 @@ def list( `List all functions. `_ Args: - name (str | None): The name of the function. - owner (str | None): Owner of the function. - file_id (int | None): The file ID of the zip-file used to create the function. - status (FunctionStatus | None): Status of the function. Possible values: ["Queued", "Deploying", "Ready", "Failed"]. - external_id_prefix (str | None): External ID prefix to filter on. - created_time (dict[Literal['min', 'max'], int] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. - metadata (dict[str, str] | None): Custom, application-specific metadata. String key -> String value. Limits: Maximum length of key is 32, value 512 characters, up to 16 key-value pairs. Maximum size of entire metadata is 4096 bytes. - limit (int | None): Maximum number of functions to return. Pass in -1, float('inf') or None to list all. + name: The name of the function. + owner: Owner of the function. + file_id: The file ID of the zip-file used to create the function. + status: Status of the function. Possible values: ["Queued", "Deploying", "Ready", "Failed"]. + external_id_prefix: External ID prefix to filter on. + created_time: Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. + metadata: Custom, application-specific metadata. String key -> String value. Limits: Maximum length of key is 32, value 512 characters, up to 16 key-value pairs. Maximum size of entire metadata is 4096 bytes. + limit: Maximum number of functions to return. Pass in -1, float('inf') or None to list all. Returns: - FunctionList: List of functions + List of functions Example: @@ -313,11 +312,11 @@ def retrieve(self, id: int | None = None, external_id: str | None = None) -> Fun `Retrieve a single function by id. `_ Args: - id (int | None): ID - external_id (str | None): External ID + id: ID + external_id: External ID Returns: - Function | None: Requested function or None if it does not exist. + Requested function or None if it does not exist. Examples: @@ -344,12 +343,12 @@ def retrieve_multiple( `Retrieve multiple functions by id. `_ Args: - ids (Sequence[int] | None): IDs - external_ids (SequenceNotStr[str] | None): External IDs - ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception. + ids: IDs + external_ids: External IDs + ignore_unknown_ids: Ignore IDs and external IDs that are not found rather than throw an exception. Returns: - FunctionList: The requested functions. + The requested functions. Examples: @@ -382,17 +381,17 @@ def call( `Call a function by its ID or external ID. `_. Args: - id (int | None): ID - external_id (str | None): External ID - data (dict[str, object] | None): Input data to the function (JSON serializable). This data is passed deserialized into the function through one of the arguments called data. **WARNING:** Secrets or other confidential information should not be passed via this argument. There is a dedicated `secrets` argument in FunctionsAPI.create() for this purpose.' - wait (bool): Wait until the function call is finished. Defaults to True. - nonce (str | None): Nonce retrieved from sessions API when creating a session. This will be used to bind the session before executing the function. If not provided, a new session will be created based on the client credentials. + id: ID + external_id: External ID + data: Input data to the function (JSON serializable). This data is passed deserialized into the function through one of the arguments called data. **WARNING:** Secrets or other confidential information should not be passed via this argument. There is a dedicated `secrets` argument in FunctionsAPI.create() for this purpose.' + wait: Wait until the function call is finished. Defaults to True. + nonce: Nonce retrieved from sessions API when creating a session. This will be used to bind the session before executing the function. If not provided, a new session will be created based on the client credentials. Tip: You can create a session via the Sessions API, using the client.iam.session.create() method. Returns: - FunctionCall: A function call object. + A function call object. Examples: @@ -417,7 +416,7 @@ def limits(self) -> FunctionsLimits: `Get service limits. `_. Returns: - FunctionsLimits: A function limits object. + A function limits object. Examples: @@ -438,7 +437,7 @@ def activate(self) -> FunctionsStatus: May take some time to take effect (hours). Returns: - FunctionsStatus: A function activation status. + A function activation status. Examples: @@ -456,7 +455,7 @@ def status(self) -> FunctionsStatus: `Functions activation status for the Project. `_. Returns: - FunctionsStatus: A function activation status. + A function activation status. Examples: diff --git a/cognite/client/_sync_api/functions/calls.py b/cognite/client/_sync_api/functions/calls.py index 7eda9684b5..f576afda24 100644 --- a/cognite/client/_sync_api/functions/calls.py +++ b/cognite/client/_sync_api/functions/calls.py @@ -1,6 +1,6 @@ """ =============================================================================== -8cd3251711aec9ad29041addaebb0730 +16313d22e1182f5949139884fbbd2ad7 This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -34,16 +34,16 @@ def list( `List all calls associated with a specific function id. `_ Either function_id or function_external_id must be specified. Args: - function_id (int | None): ID of the function on which the calls were made. - function_external_id (str | None): External ID of the function on which the calls were made. - status (str | None): Status of the call. Possible values ["Running", "Failed", "Completed", "Timeout"]. - schedule_id (int | None): Schedule id from which the call belongs (if any). - start_time (dict[str, int] | None): Start time of the call. Possible keys are `min` and `max`, with values given as time stamps in ms. - end_time (dict[str, int] | None): End time of the call. Possible keys are `min` and `max`, with values given as time stamps in ms. - limit (int | None): Maximum number of function calls to list. Pass in -1, float('inf') or None to list all Function Calls. + function_id: ID of the function on which the calls were made. + function_external_id: External ID of the function on which the calls were made. + status: Status of the call. Possible values ["Running", "Failed", "Completed", "Timeout"]. + schedule_id: Schedule id from which the call belongs (if any). + start_time: Start time of the call. Possible keys are `min` and `max`, with values given as time stamps in ms. + end_time: End time of the call. Possible keys are `min` and `max`, with values given as time stamps in ms. + limit: Maximum number of function calls to list. Pass in -1, float('inf') or None to list all Function Calls. Returns: - FunctionCallList: List of function calls + List of function calls Examples: @@ -78,12 +78,12 @@ def retrieve( `Retrieve a single function call by id. `_ Args: - call_id (int): ID of the call. - function_id (int | None): ID of the function on which the call was made. - function_external_id (str | None): External ID of the function on which the call was made. + call_id: ID of the call. + function_id: ID of the function on which the call was made. + function_external_id: External ID of the function on which the call was made. Returns: - FunctionCall | None: Requested function call or None if either call ID or function identifier is not found. + Requested function call or None if either call ID or function identifier is not found. Examples: @@ -112,12 +112,12 @@ def get_response( `Retrieve the response from a function call. `_ Args: - call_id (int): ID of the call. - function_id (int | None): ID of the function on which the call was made. - function_external_id (str | None): External ID of the function on which the call was made. + call_id: ID of the call. + function_id: ID of the function on which the call was made. + function_external_id: External ID of the function on which the call was made. Returns: - dict[str, object] | None: Response from the function call. + Response from the function call. Examples: @@ -146,12 +146,12 @@ def get_logs( `Retrieve logs for function call. `_ Args: - call_id (int): ID of the call. - function_id (int | None): ID of the function on which the call was made. - function_external_id (str | None): External ID of the function on which the call was made. + call_id: ID of the call. + function_id: ID of the function on which the call was made. + function_external_id: External ID of the function on which the call was made. Returns: - FunctionCallLog: Log for the function call. + Log for the function call. Examples: diff --git a/cognite/client/_sync_api/functions/schedules.py b/cognite/client/_sync_api/functions/schedules.py index 4046b82781..afe0fa961a 100644 --- a/cognite/client/_sync_api/functions/schedules.py +++ b/cognite/client/_sync_api/functions/schedules.py @@ -1,6 +1,6 @@ """ =============================================================================== -c224295f9172ecb603e6c8da256ce938 +f3bfae368b7b5c9549e261eb691da2cb This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -67,16 +67,16 @@ def __call__( Iterate over function schedules Args: - chunk_size (int | None): The number of schedules to return in each chunk. Defaults to yielding one schedule a time. - name (str | None): Name of the function schedule. - function_id (int | None): ID of the function the schedules are linked to. - function_external_id (str | None): External ID of the function the schedules are linked to. - created_time (dict[str, int] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. - cron_expression (str | None): Cron expression. - limit (int | None): Maximum schedules to return. Defaults to return all schedules. + chunk_size: The number of schedules to return in each chunk. Defaults to yielding one schedule a time. + name: Name of the function schedule. + function_id: ID of the function the schedules are linked to. + function_external_id: External ID of the function the schedules are linked to. + created_time: Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. + cron_expression: Cron expression. + limit: Maximum schedules to return. Defaults to return all schedules. Yields: - FunctionSchedule | FunctionSchedulesList: Function schedules. + Function schedules. """ # noqa: DOC404 yield from SyncIterator( self.__async_client.functions.schedules( @@ -103,11 +103,11 @@ def retrieve( `Retrieve a single function schedule by id. `_ Args: - id (int | Sequence[int]): Schedule ID - ignore_unknown_ids (bool): Ignore IDs that are not found rather than throw an exception. + id: Schedule ID + ignore_unknown_ids: Ignore IDs that are not found rather than throw an exception. Returns: - FunctionSchedule | None | FunctionSchedulesList: Requested function schedule or None if not found. + Requested function schedule or None if not found. Examples: @@ -133,15 +133,15 @@ def list( `List all schedules associated with a specific project. `_ Args: - name (str | None): Name of the function schedule. - function_id (int | None): ID of the function the schedules are linked to. - function_external_id (str | None): External ID of the function the schedules are linked to. - created_time (dict[str, int] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. - cron_expression (str | None): Cron expression. - limit (int | None): Maximum number of schedules to list. Pass in -1, float('inf') or None to list all. + name: Name of the function schedule. + function_id: ID of the function the schedules are linked to. + function_external_id: External ID of the function the schedules are linked to. + created_time: Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. + cron_expression: Cron expression. + limit: Maximum number of schedules to list. Pass in -1, float('inf') or None to list all. Returns: - FunctionSchedulesList: List of function schedules + List of function schedules Examples: @@ -182,18 +182,16 @@ def create( `Create a schedule associated with a specific project. `_ Args: - name (str | FunctionScheduleWrite): Name of the schedule or FunctionSchedule object. If a function schedule object is passed, the other arguments are ignored except for the client_credentials argument. - cron_expression (str | None): Cron expression. - function_id (int | None): Id of the function to attach the schedule to. - function_external_id (str | None): (DEPRECATED) External id of the function to attach the schedule to. - Note: Will be automatically converted to (internal) ID, as schedules must be bound to an ID. - client_credentials (dict[str, str] | ClientCredentials | None): Instance of ClientCredentials - or a dictionary containing client credentials: 'client_id' and 'client_secret'. - description (str | None): Description of the schedule. - data (dict[str, object] | None): Data to be passed to the scheduled run. + name: Name of the schedule or FunctionSchedule object. If a function schedule object is passed, the other arguments are ignored except for the client_credentials argument. + cron_expression: Cron expression. + function_id: Id of the function to attach the schedule to. + function_external_id: (DEPRECATED) External id of the function to attach the schedule to. Note: Will be automatically converted to (internal) ID, as schedules must be bound to an ID. + client_credentials: Instance of ClientCredentials or a dictionary containing client credentials: 'client_id' and 'client_secret'. + description: Description of the schedule. + data: Data to be passed to the scheduled run. Returns: - FunctionSchedule: Created function schedule. + Created function schedule. Note: There are several ways to authenticate the function schedule — the order of priority is as follows: @@ -270,7 +268,7 @@ def delete(self, id: int) -> None: `Delete a schedule associated with a specific project. `_ Args: - id (int): Id of the schedule + id: Id of the schedule Examples: @@ -288,10 +286,10 @@ def get_input_data(self, id: int) -> dict[str, object] | None: `Retrieve the input data to the associated function. `_ Args: - id (int): Id of the schedule + id: Id of the schedule Returns: - dict[str, object] | None: Input data to the associated function or None if not set. This data is passed deserialized into the function through the data argument. + Input data to the associated function or None if not set. This data is passed deserialized into the function through the data argument. Examples: diff --git a/cognite/client/_sync_api/geospatial.py b/cognite/client/_sync_api/geospatial.py index 22cb624499..2b52748c47 100644 --- a/cognite/client/_sync_api/geospatial.py +++ b/cognite/client/_sync_api/geospatial.py @@ -1,6 +1,6 @@ """ =============================================================================== -73212a8abaf0b44017f929a3d190bc8a +ddd906b7c138e3c75c04f8648c4438c8 This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -58,10 +58,10 @@ def create_feature_types( Args: - feature_type (FeatureType | FeatureTypeWrite | Sequence[FeatureType] | Sequence[FeatureTypeWrite]): feature type definition or list of feature type definitions to create. + feature_type: feature type definition or list of feature type definitions to create. Returns: - FeatureType | FeatureTypeList: Created feature type definition(s) + Created feature type definition(s) Examples: @@ -89,8 +89,8 @@ def delete_feature_types(self, external_id: str | SequenceNotStr[str], recursive Args: - external_id (str | SequenceNotStr[str]): External ID or list of external ids - recursive (bool): if `true` the features will also be dropped + external_id: External ID or list of external ids + recursive: if `true` the features will also be dropped Examples: @@ -111,7 +111,7 @@ def list_feature_types(self) -> FeatureTypeList: Returns: - FeatureTypeList: List of feature types + List of feature types Examples: @@ -137,10 +137,10 @@ def retrieve_feature_types(self, external_id: str | list[str]) -> FeatureType | Args: - external_id (str | list[str]): External ID + external_id: External ID Returns: - FeatureType | FeatureTypeList: Requested Type or None if it does not exist. + Requested Type or None if it does not exist. Examples: @@ -159,10 +159,10 @@ def patch_feature_types(self, patch: FeatureTypePatch | Sequence[FeatureTypePatc Args: - patch (FeatureTypePatch | Sequence[FeatureTypePatch]): the patch to apply + patch: the patch to apply Returns: - FeatureTypeList: The patched feature types. + The patched feature types. Examples: @@ -226,13 +226,13 @@ def create_features( Args: - feature_type_external_id (str): Feature type definition for the features to create. - feature (Feature | FeatureWrite | Sequence[Feature] | Sequence[FeatureWrite] | FeatureList | FeatureWriteList): one feature or a list of features to create or a FeatureList object - allow_crs_transformation (bool): If true, then input geometries will be transformed into the Coordinate Reference System defined in the feature type specification. When it is false, then requests with geometries in Coordinate Reference System different from the ones defined in the feature type will result in CogniteAPIError exception. - chunk_size (int | None): maximum number of items in a single request to the api + feature_type_external_id: Feature type definition for the features to create. + feature: one feature or a list of features to create or a FeatureList object + allow_crs_transformation: If true, then input geometries will be transformed into the Coordinate Reference System defined in the feature type specification. When it is false, then requests with geometries in Coordinate Reference System different from the ones defined in the feature type will result in CogniteAPIError exception. + chunk_size: maximum number of items in a single request to the api Returns: - Feature | FeatureList: Created features + Created features Examples: @@ -278,8 +278,8 @@ def delete_features( Args: - feature_type_external_id (str): No description. - external_id (str | SequenceNotStr[str] | None): External ID or list of external ids + feature_type_external_id: No description. + external_id: External ID or list of external ids Examples: @@ -317,12 +317,12 @@ def retrieve_features( Args: - feature_type_external_id (str): No description. - external_id (str | list[str]): External ID or list of external ids - properties (dict[str, Any] | None): the output property selection + feature_type_external_id: No description. + external_id: External ID or list of external ids + properties: the output property selection Returns: - FeatureList | Feature: Requested features or None if it does not exist. + Requested features or None if it does not exist. Examples: @@ -372,13 +372,13 @@ def update_features( Args: - feature_type_external_id (str): No description. - feature (Feature | FeatureWrite | Sequence[Feature] | Sequence[FeatureWrite]): feature or list of features. - allow_crs_transformation (bool): If true, then input geometries will be transformed into the Coordinate Reference System defined in the feature type specification. When it is false, then requests with geometries in Coordinate Reference System different from the ones defined in the feature type will result in CogniteAPIError exception. - chunk_size (int | None): maximum number of items in a single request to the api + feature_type_external_id: No description. + feature: feature or list of features. + allow_crs_transformation: If true, then input geometries will be transformed into the Coordinate Reference System defined in the feature type specification. When it is false, then requests with geometries in Coordinate Reference System different from the ones defined in the feature type will result in CogniteAPIError exception. + chunk_size: maximum number of items in a single request to the api Returns: - Feature | FeatureList: Updated features + Updated features Examples: @@ -420,14 +420,14 @@ def list_features( This method allows to filter all features. Args: - feature_type_external_id (str): the feature type to list features for - filter (dict[str, Any] | None): the list filter - properties (dict[str, Any] | None): the output property selection - limit (int | None): Maximum number of features to return. Defaults to 25. Set to -1, float("inf") or None to return all features. - allow_crs_transformation (bool): If true, then input geometries if existing in the filter will be transformed into the Coordinate Reference System defined in the feature type specification. When it is false, then requests with geometries in Coordinate Reference System different from the ones defined in the feature type will result in CogniteAPIError exception. + feature_type_external_id: the feature type to list features for + filter: the list filter + properties: the output property selection + limit: Maximum number of features to return. Defaults to 25. Set to -1, float("inf") or None to return all features. + allow_crs_transformation: If true, then input geometries if existing in the filter will be transformed into the Coordinate Reference System defined in the feature type specification. When it is false, then requests with geometries in Coordinate Reference System different from the ones defined in the feature type will result in CogniteAPIError exception. Returns: - FeatureList: The filtered features + The filtered features Examples: @@ -501,16 +501,16 @@ def search_features( If you need to return more than 1000 items, use the `stream_features(...)` method instead. Args: - feature_type_external_id (str): The feature type to search for - filter (dict[str, Any] | None): The search filter - properties (dict[str, Any] | None): The output property selection - limit (int): Maximum number of results - order_by (Sequence[OrderSpec] | None): The order specification - allow_crs_transformation (bool): If true, then input geometries will be transformed into the Coordinate Reference System defined in the feature type specification. When it is false, then requests with geometries in Coordinate Reference System different from the ones defined in the feature type will result in CogniteAPIError exception. - allow_dimensionality_mismatch (bool): Indicating if the spatial filter operators allow input geometries with a different dimensionality than the properties they are applied to. Defaults to False. + feature_type_external_id: The feature type to search for + filter: The search filter + properties: The output property selection + limit: Maximum number of results + order_by: The order specification + allow_crs_transformation: If true, then input geometries will be transformed into the Coordinate Reference System defined in the feature type specification. When it is false, then requests with geometries in Coordinate Reference System different from the ones defined in the feature type will result in CogniteAPIError exception. + allow_dimensionality_mismatch: Indicating if the spatial filter operators allow input geometries with a different dimensionality than the properties they are applied to. Defaults to False. Returns: - FeatureList: the filtered features + the filtered features Examples: @@ -626,14 +626,14 @@ def stream_features( If you need to order the results, use the `search_features(...)` method instead. Args: - feature_type_external_id (str): the feature type to search for - filter (dict[str, Any] | None): the search filter - properties (dict[str, Any] | None): the output property selection - allow_crs_transformation (bool): If true, then input geometries will be transformed into the Coordinate Reference System defined in the feature type specification. When it is false, then requests with geometries in Coordinate Reference System different from the ones defined in the feature type will result in CogniteAPIError exception. - allow_dimensionality_mismatch (bool): Indicating if the spatial filter operators allow input geometries with a different dimensionality than the properties they are applied to. Defaults to False. + feature_type_external_id: the feature type to search for + filter: the search filter + properties: the output property selection + allow_crs_transformation: If true, then input geometries will be transformed into the Coordinate Reference System defined in the feature type specification. When it is false, then requests with geometries in Coordinate Reference System different from the ones defined in the feature type will result in CogniteAPIError exception. + allow_dimensionality_mismatch: Indicating if the spatial filter operators allow input geometries with a different dimensionality than the properties they are applied to. Defaults to False. Yields: - Feature: a generator for the filtered features + a generator for the filtered features Examples: @@ -686,14 +686,14 @@ def aggregate_features( Args: - feature_type_external_id (str): the feature type to filter features from - filter (dict[str, Any] | None): the search filter - group_by (SequenceNotStr[str] | None): list of properties to group by with - order_by (Sequence[OrderSpec] | None): the order specification - output (dict[str, Any] | None): the aggregate output + feature_type_external_id: the feature type to filter features from + filter: the search filter + group_by: list of properties to group by with + order_by: the order specification + output: the aggregate output Returns: - FeatureAggregateList: the filtered features + the filtered features Examples: @@ -734,10 +734,10 @@ def get_coordinate_reference_systems(self, srids: int | Sequence[int]) -> Coordi Args: - srids (int | Sequence[int]): (Union[int, Sequence[int]]): SRID or list of SRIDs + srids: SRID or list of SRIDs Returns: - CoordinateReferenceSystemList: Requested CRSs. + Requested CRSs. Examples: @@ -756,10 +756,10 @@ def list_coordinate_reference_systems(self, only_custom: bool = False) -> Coordi Args: - only_custom (bool): list only custom CRSs or not + only_custom: list only custom CRSs or not Returns: - CoordinateReferenceSystemList: list of CRSs. + list of CRSs. Examples: @@ -784,10 +784,10 @@ def create_coordinate_reference_systems( Args: - crs (CoordinateReferenceSystem | CoordinateReferenceSystemWrite | Sequence[CoordinateReferenceSystem] | Sequence[CoordinateReferenceSystemWrite]): a CoordinateReferenceSystem or a list of CoordinateReferenceSystem + crs: a CoordinateReferenceSystem or a list of CoordinateReferenceSystem Returns: - CoordinateReferenceSystemList: list of CRSs. + list of CRSs. Examples: @@ -840,7 +840,7 @@ def delete_coordinate_reference_systems(self, srids: int | Sequence[int]) -> Non Args: - srids (int | Sequence[int]): (Union[int, Sequence[int]]): SRID or list of SRIDs + srids: SRID or list of SRIDs Examples: @@ -869,18 +869,18 @@ def put_raster( `Put raster ` Args: - feature_type_external_id (str): No description. - feature_external_id (str): one feature or a list of features to create - raster_property_name (str): the raster property name - raster_format (str): the raster input format - raster_srid (int): the associated SRID for the raster - file (str | Path): the path to the file of the raster - allow_crs_transformation (bool): When the parameter is false, requests with rasters in Coordinate Reference System different from the one defined in the feature type will result in bad request response code. - raster_scale_x (float | None): the X component of the pixel width in units of coordinate reference system - raster_scale_y (float | None): the Y component of the pixel height in units of coordinate reference system + feature_type_external_id: No description. + feature_external_id: one feature or a list of features to create + raster_property_name: the raster property name + raster_format: the raster input format + raster_srid: the associated SRID for the raster + file: the path to the file of the raster + allow_crs_transformation: When the parameter is false, requests with rasters in Coordinate Reference System different from the one defined in the feature type will result in bad request response code. + raster_scale_x: the X component of the pixel width in units of coordinate reference system + raster_scale_y: the Y component of the pixel height in units of coordinate reference system Returns: - RasterMetadata: the raster metadata if it was ingested successfully + the raster metadata if it was ingested successfully Examples: @@ -914,9 +914,9 @@ def delete_raster(self, feature_type_external_id: str, feature_external_id: str, `Delete raster ` Args: - feature_type_external_id (str): No description. - feature_external_id (str): one feature or a list of features to create - raster_property_name (str): the raster property name + feature_type_external_id: No description. + feature_external_id: one feature or a list of features to create + raster_property_name: the raster property name Examples: @@ -954,18 +954,18 @@ def get_raster( `Get raster ` Args: - feature_type_external_id (str): Feature type definition for the features to create. - feature_external_id (str): one feature or a list of features to create - raster_property_name (str): the raster property name - raster_format (str): the raster output format - raster_options (dict[str, Any] | None): GDAL raster creation key-value options - raster_srid (int | None): the SRID for the output raster - raster_scale_x (float | None): the X component of the output pixel width in units of coordinate reference system - raster_scale_y (float | None): the Y component of the output pixel height in units of coordinate reference system - allow_crs_transformation (bool): When the parameter is false, requests with output rasters in Coordinate Reference System different from the one defined in the feature type will result in bad request response code. + feature_type_external_id: Feature type definition for the features to create. + feature_external_id: one feature or a list of features to create + raster_property_name: the raster property name + raster_format: the raster output format + raster_options: GDAL raster creation key-value options + raster_srid: the SRID for the output raster + raster_scale_x: the X component of the output pixel width in units of coordinate reference system + raster_scale_y: the Y component of the output pixel height in units of coordinate reference system + allow_crs_transformation: When the parameter is false, requests with output rasters in Coordinate Reference System different from the one defined in the feature type will result in bad request response code. Returns: - bytes: the raster data + the raster data Examples: @@ -999,10 +999,10 @@ def compute(self, output: dict[str, GeospatialComputeFunction]) -> GeospatialCom `Compute ` Args: - output (dict[str, GeospatialComputeFunction]): No description. + output: No description. Returns: - GeospatialComputedResponse: Mapping of keys to computed items. + Mapping of keys to computed items. Examples: diff --git a/cognite/client/_sync_api/hosted_extractors/destinations.py b/cognite/client/_sync_api/hosted_extractors/destinations.py index efe400aff0..c8edb49c55 100644 --- a/cognite/client/_sync_api/hosted_extractors/destinations.py +++ b/cognite/client/_sync_api/hosted_extractors/destinations.py @@ -1,6 +1,6 @@ """ =============================================================================== -e357a5685b77fbbefe2a7f72f8929ba0 +cf1dc62492108cfa8156cfe85c1f0eb3 This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -47,11 +47,11 @@ def __call__( Fetches Destination as they are iterated over, so you keep a limited number of destinations in memory. Args: - chunk_size (int | None): Number of Destinations to return in each chunk. Defaults to yielding one Destination a time. - limit (int | None): Maximum number of Destination to return. Defaults to returning all items. + chunk_size: Number of Destinations to return in each chunk. Defaults to yielding one Destination a time. + limit: Maximum number of Destination to return. Defaults to returning all items. Yields: - Destination | DestinationList: yields Destination one by one if chunk_size is not specified, else DestinationList objects. + yields Destination one by one if chunk_size is not specified, else DestinationList objects. """ # noqa: DOC404 yield from SyncIterator(self.__async_client.hosted_extractors.destinations(chunk_size=chunk_size, limit=limit)) @@ -68,12 +68,11 @@ def retrieve( `Retrieve one or more destinations. `_ Args: - external_ids (str | SequenceNotStr[str]): The external ID provided by the client. Must be unique for the resource type. - ignore_unknown_ids (bool): Ignore external IDs that are not found - + external_ids: The external ID provided by the client. Must be unique for the resource type. + ignore_unknown_ids: Ignore external IDs that are not found Returns: - Destination | DestinationList: Requested destinations + Requested destinations Examples: @@ -99,9 +98,9 @@ def delete( `Delete one or more destsinations `_ Args: - external_ids (str | SequenceNotStr[str]): The external ID provided by the client. Must be unique for the resource type. - ignore_unknown_ids (bool): Ignore external IDs that are not found - force (bool): Delete any jobs associated with each item. + external_ids: The external ID provided by the client. Must be unique for the resource type. + ignore_unknown_ids: Ignore external IDs that are not found + force: Delete any jobs associated with each item. Examples: @@ -129,10 +128,10 @@ def create(self, items: DestinationWrite | Sequence[DestinationWrite]) -> Destin `Create one or more destinations. `_ Args: - items (DestinationWrite | Sequence[DestinationWrite]): Destination(s) to create. + items: Destination(s) to create. Returns: - Destination | DestinationList: Created destination(s) + Created destination(s) Examples: @@ -170,11 +169,11 @@ def update( `Update one or more destinations. `_ Args: - items (DestinationWrite | DestinationUpdate | Sequence[DestinationWrite | DestinationUpdate]): Destination(s) to update. - mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (DestinationWrite). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. + items: Destination(s) to update. + mode: How to update data when a non-update object is given (DestinationWrite). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. Returns: - Destination | DestinationList: Updated destination(s) + Updated destination(s) Examples: @@ -194,10 +193,10 @@ def list(self, limit: int | None = DEFAULT_LIMIT_READ) -> DestinationList: `List destinations `_ Args: - limit (int | None): Maximum number of destinations to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + limit: Maximum number of destinations to return. Defaults to 25. Set to -1, float("inf") or None to return all items. Returns: - DestinationList: List of requested destinations + List of requested destinations Examples: diff --git a/cognite/client/_sync_api/hosted_extractors/jobs.py b/cognite/client/_sync_api/hosted_extractors/jobs.py index c24a4b6f53..0f9d841aa6 100644 --- a/cognite/client/_sync_api/hosted_extractors/jobs.py +++ b/cognite/client/_sync_api/hosted_extractors/jobs.py @@ -1,6 +1,6 @@ """ =============================================================================== -bd17aab24c1acbcf37ae90452f8d16b4 +605e64e7f9ad12b42736af8c5cef2d05 This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -47,11 +47,11 @@ def __call__(self, chunk_size: int | None = None, limit: int | None = None) -> I Fetches jobs as they are iterated over, so you keep a limited number of jobs in memory. Args: - chunk_size (int | None): Number of jobs to return in each chunk. Defaults to yielding one job a time. - limit (int | None): Maximum number of jobs to return. Defaults to returning all items. + chunk_size: Number of jobs to return in each chunk. Defaults to yielding one job a time. + limit: Maximum number of jobs to return. Defaults to returning all items. Yields: - Job | JobList: yields Job one by one if chunk_size is not specified, else JobList objects. + yields Job one by one if chunk_size is not specified, else JobList objects. """ # noqa: DOC404 yield from SyncIterator(self.__async_client.hosted_extractors.jobs(chunk_size=chunk_size, limit=limit)) # type: ignore [misc] @@ -68,11 +68,11 @@ def retrieve( `Retrieve one or more jobs. `_ Args: - external_ids (str | SequenceNotStr[str]): The external ID provided by the client. Must be unique for the job type. - ignore_unknown_ids (bool): Ignore external IDs that are not found + external_ids: The external ID provided by the client. Must be unique for the job type. + ignore_unknown_ids: Ignore external IDs that are not found Returns: - Job | None | JobList: Requested jobs + Requested jobs Examples: @@ -96,8 +96,8 @@ def delete(self, external_ids: str | SequenceNotStr[str], ignore_unknown_ids: bo `Delete one or more jobs `_ Args: - external_ids (str | SequenceNotStr[str]): The external ID provided by the client. Must be unique for the resource type. - ignore_unknown_ids (bool): Ignore external IDs that are not found + external_ids: The external ID provided by the client. Must be unique for the resource type. + ignore_unknown_ids: Ignore external IDs that are not found Examples: Delete jobs by external id: @@ -124,10 +124,10 @@ def create(self, items: JobWrite | Sequence[JobWrite]) -> Job | JobList: `Create one or more jobs. `_ Args: - items (JobWrite | Sequence[JobWrite]): Job(s) to create. + items: Job(s) to create. Returns: - Job | JobList: Created job(s) + Created job(s) Examples: @@ -165,11 +165,11 @@ def update( `Update one or more jobs. `_ Args: - items (JobWrite | JobUpdate | Sequence[JobWrite | JobUpdate]): Job(s) to update. - mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (JobWrite). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. + items: Job(s) to update. + mode: How to update data when a non-update object is given (JobWrite). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. Returns: - Job | JobList: Updated job(s) + Updated job(s) Examples: @@ -189,10 +189,10 @@ def list(self, limit: int | None = DEFAULT_LIMIT_READ) -> JobList: `List jobs `_ Args: - limit (int | None): Maximum number of jobs to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + limit: Maximum number of jobs to return. Defaults to 25. Set to -1, float("inf") or None to return all items. Returns: - JobList: List of requested jobs + List of requested jobs Examples: @@ -226,13 +226,13 @@ def list_logs( `List job logs. `_ Args: - job (str | None): Require returned logs to belong to the job given by this external ID. - source (str | None): Require returned logs to belong to the any job with source given by this external ID. - destination (str | None): Require returned logs to belong to the any job with destination given by this external ID. - limit (int | None): Maximum number of logs to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + job: Require returned logs to belong to the job given by this external ID. + source: Require returned logs to belong to the any job with source given by this external ID. + destination: Require returned logs to belong to the any job with destination given by this external ID. + limit: Maximum number of logs to return. Defaults to 25. Set to -1, float("inf") or None to return all items. Returns: - JobLogsList: List of requested job logs + List of requested job logs Examples: @@ -260,13 +260,13 @@ def list_metrics( `List job metrics. `_ Args: - job (str | None): Require returned metrics to belong to the job given by this external ID. - source (str | None): Require returned metrics to belong to the any job with source given by this external ID. - destination (str | None): Require returned metrics to belong to the any job with destination given by this external ID. - limit (int | None): Maximum number of metrics to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + job: Require returned metrics to belong to the job given by this external ID. + source: Require returned metrics to belong to the any job with source given by this external ID. + destination: Require returned metrics to belong to the any job with destination given by this external ID. + limit: Maximum number of metrics to return. Defaults to 25. Set to -1, float("inf") or None to return all items. Returns: - JobMetricsList: List of requested job metrics + List of requested job metrics Examples: diff --git a/cognite/client/_sync_api/hosted_extractors/mappings.py b/cognite/client/_sync_api/hosted_extractors/mappings.py index 0585790ad4..635d955431 100644 --- a/cognite/client/_sync_api/hosted_extractors/mappings.py +++ b/cognite/client/_sync_api/hosted_extractors/mappings.py @@ -1,6 +1,6 @@ """ =============================================================================== -d93326405938df60470510da7470796c +0d8bc2bb1d8d3a862a2f28e23af3073a This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -42,11 +42,11 @@ def __call__( Fetches Mapping as they are iterated over, so you keep a limited number of mappings in memory. Args: - chunk_size (int | None): Number of Mappings to return in each chunk. Defaults to yielding one mapping at a time. - limit (int | None): Maximum number of mappings to return. Defaults to returning all items. + chunk_size: Number of Mappings to return in each chunk. Defaults to yielding one mapping at a time. + limit: Maximum number of mappings to return. Defaults to returning all items. Yields: - Mapping | MappingList: yields Mapping one by one if chunk_size is not specified, else MappingList objects. + yields Mapping one by one if chunk_size is not specified, else MappingList objects. """ # noqa: DOC404 yield from SyncIterator(self.__async_client.hosted_extractors.mappings(chunk_size=chunk_size, limit=limit)) @@ -63,12 +63,11 @@ def retrieve( `Retrieve one or more mappings. `_ Args: - external_ids (str | SequenceNotStr[str]): The external ID provided by the client. Must be unique for the resource type. - ignore_unknown_ids (bool): Ignore external IDs that are not found - + external_ids: The external ID provided by the client. Must be unique for the resource type. + ignore_unknown_ids: Ignore external IDs that are not found Returns: - Mapping | MappingList: Requested mappings + Requested mappings Examples: @@ -94,9 +93,9 @@ def delete( `Delete one or more mappings `_ Args: - external_ids (str | SequenceNotStr[str]): The external ID provided by the client. Must be unique for the resource type. - ignore_unknown_ids (bool): Ignore external IDs that are not found - force (bool): Delete any jobs associated with each item. + external_ids: The external ID provided by the client. Must be unique for the resource type. + ignore_unknown_ids: Ignore external IDs that are not found + force: Delete any jobs associated with each item. Examples: @@ -124,10 +123,10 @@ def create(self, items: MappingWrite | Sequence[MappingWrite]) -> Mapping | Mapp `Create one or more mappings. `_ Args: - items (MappingWrite | Sequence[MappingWrite]): Mapping(s) to create. + items: Mapping(s) to create. Returns: - Mapping | MappingList: Created mapping(s) + Created mapping(s) Examples: @@ -155,10 +154,10 @@ def update( `Update one or more mappings. `_ Args: - items (MappingWrite | MappingUpdate | Sequence[MappingWrite | MappingUpdate]): Mapping(s) to update. + items: Mapping(s) to update. Returns: - Mapping | MappingList: Updated mapping(s) + Updated mapping(s) Examples: @@ -178,10 +177,10 @@ def list(self, limit: int | None = DEFAULT_LIMIT_READ) -> MappingList: `List mappings `_ Args: - limit (int | None): Maximum number of mappings to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + limit: Maximum number of mappings to return. Defaults to 25. Set to -1, float("inf") or None to return all items. Returns: - MappingList: List of requested mappings + List of requested mappings Examples: diff --git a/cognite/client/_sync_api/hosted_extractors/sources.py b/cognite/client/_sync_api/hosted_extractors/sources.py index 8c15bfa0e2..95d834259f 100644 --- a/cognite/client/_sync_api/hosted_extractors/sources.py +++ b/cognite/client/_sync_api/hosted_extractors/sources.py @@ -1,6 +1,6 @@ """ =============================================================================== -2d35b7bf30842fb0d4d5423ee14f3110 +5eacaa7290d67a35f580b40c4caf2cbe This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -42,11 +42,11 @@ def __call__( Fetches sources as they are iterated over, so you keep a limited number of sources in memory. Args: - chunk_size (int | None): Number of sources to return in each chunk. Defaults to yielding one source a time. - limit (int | None): Maximum number of sources to return. Defaults to returning all items. + chunk_size: Number of sources to return in each chunk. Defaults to yielding one source a time. + limit: Maximum number of sources to return. Defaults to returning all items. Yields: - Source | SourceList: yields Source one by one if chunk_size is not specified, else SourceList objects. + yields Source one by one if chunk_size is not specified, else SourceList objects. """ # noqa: DOC404 yield from SyncIterator(self.__async_client.hosted_extractors.sources(chunk_size=chunk_size, limit=limit)) # type: ignore [misc] @@ -63,11 +63,11 @@ def retrieve( `Retrieve one or more sources. `_ Args: - external_ids (str | SequenceNotStr[str]): The external ID provided by the client. Must be unique for the resource type. - ignore_unknown_ids (bool): Ignore external IDs that are not found rather than throw an exception. + external_ids: The external ID provided by the client. Must be unique for the resource type. + ignore_unknown_ids: Ignore external IDs that are not found rather than throw an exception. Returns: - Source | SourceList: Requested sources + Requested sources Examples: @@ -93,9 +93,9 @@ def delete( `Delete one or more sources `_ Args: - external_ids (str | SequenceNotStr[str]): The external ID provided by the client. Must be unique for the resource type. - ignore_unknown_ids (bool): Ignore external IDs that are not found rather than throw an exception. - force (bool): Delete any jobs associated with each item. + external_ids: The external ID provided by the client. Must be unique for the resource type. + ignore_unknown_ids: Ignore external IDs that are not found rather than throw an exception. + force: Delete any jobs associated with each item. Examples: Delete sources by id: @@ -122,10 +122,10 @@ def create(self, items: SourceWrite | Sequence[SourceWrite]) -> Source | SourceL `Create one or more sources. `_ Args: - items (SourceWrite | Sequence[SourceWrite]): Source(s) to create. + items: Source(s) to create. Returns: - Source | SourceList: Created source(s) + Created source(s) Examples: @@ -163,11 +163,11 @@ def update( `Update one or more sources. `_ Args: - items (SourceWrite | SourceUpdate | Sequence[SourceWrite | SourceUpdate]): Source(s) to update. - mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (SourceWrite). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. + items: Source(s) to update. + mode: How to update data when a non-update object is given (SourceWrite). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. Returns: - Source | SourceList: Updated source(s) + Updated source(s) Examples: @@ -187,10 +187,10 @@ def list(self, limit: int | None = DEFAULT_LIMIT_READ) -> SourceList: `List sources `_ Args: - limit (int | None): Maximum number of sources to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + limit: Maximum number of sources to return. Defaults to 25. Set to -1, float("inf") or None to return all items. Returns: - SourceList: List of requested sources + List of requested sources Examples: diff --git a/cognite/client/_sync_api/iam/__init__.py b/cognite/client/_sync_api/iam/__init__.py index 9fb0b09923..c922b0df08 100644 --- a/cognite/client/_sync_api/iam/__init__.py +++ b/cognite/client/_sync_api/iam/__init__.py @@ -1,6 +1,6 @@ """ =============================================================================== -dee7369c5fe919ffd146a7c16845acea +63e400e3d7338081833389aaaef854ee This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -9,6 +9,7 @@ from typing import TYPE_CHECKING +from cognite.client import AsyncCogniteClient from cognite.client._api.iam import ComparableCapability from cognite.client._sync_api.iam.groups import SyncGroupsAPI from cognite.client._sync_api.iam.security_categories import SyncSecurityCategoriesAPI @@ -54,14 +55,12 @@ def compare_capabilities( ``client.iam.verify_capabilities`` instead. Args: - existing_capabilities (ComparableCapability): List of existing capabilities. - desired_capabilities (ComparableCapability): List of wanted capabilities to check against existing. - project (str | None): If a ProjectCapability or ProjectCapabilityList is passed, we need to know which CDF project - to pull capabilities from (existing might be from several). If project is not passed, and ProjectCapabilityList - is used, it will be inferred from the AsyncCogniteClient used to call retrieve it via token/inspect. + existing_capabilities: List of existing capabilities. + desired_capabilities: List of wanted capabilities to check against existing. + project: If a ProjectCapability or ProjectCapabilityList is passed, we need to know which CDF project to pull capabilities from (existing might be from several). If project is not passed, and ProjectCapabilityList is used, it will be inferred from the AsyncCogniteClient used to call retrieve it via token/inspect. Returns: - list[Capability]: A flattened list of the missing capabilities, meaning they each have exactly 1 action, 1 scope, 1 id etc. + A flattened list of the missing capabilities, meaning they each have exactly 1 action, 1 scope, 1 id etc. Examples: @@ -112,10 +111,10 @@ def verify_capabilities(self, desired_capabilities: ComparableCapability) -> lis Helper method to compare your current capabilities with a set of desired capabilities and return any missing. Args: - desired_capabilities (ComparableCapability): List of desired capabilities to check against existing. + desired_capabilities: List of desired capabilities to check against existing. Returns: - list[Capability]: A flattened list of the missing capabilities, meaning they each have exactly 1 action, 1 scope, 1 id etc. + A flattened list of the missing capabilities, meaning they each have exactly 1 action, 1 scope, 1 id etc. Examples: diff --git a/cognite/client/_sync_api/iam/groups.py b/cognite/client/_sync_api/iam/groups.py index 685c97c7ed..76335fac71 100644 --- a/cognite/client/_sync_api/iam/groups.py +++ b/cognite/client/_sync_api/iam/groups.py @@ -1,6 +1,6 @@ """ =============================================================================== -a9ee1fd39d27052b45b4fbf4d3240b4a +c8c735fa45de9c52b66ec5a35b2d5d16 This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -28,10 +28,10 @@ def list(self, all: bool = False) -> GroupList: `List groups. `_ Args: - all (bool): Whether to get all groups, only available with the groups:list acl. + all: Whether to get all groups, only available with the groups:list acl. Returns: - GroupList: List of groups. + List of groups. Example: @@ -59,9 +59,9 @@ def create(self, group: Group | GroupWrite | Sequence[Group] | Sequence[GroupWri `Create one or more groups. `_ Args: - group (Group | GroupWrite | Sequence[Group] | Sequence[GroupWrite]): Group or list of groups to create. + group: Group or list of groups to create. Returns: - Group | GroupList: The created group(s). + The created group(s). Example: @@ -124,7 +124,7 @@ def delete(self, id: int | Sequence[int]) -> None: `Delete one or more groups. `_ Args: - id (int | Sequence[int]): ID or list of IDs of groups to delete. + id: ID or list of IDs of groups to delete. Example: diff --git a/cognite/client/_sync_api/iam/security_categories.py b/cognite/client/_sync_api/iam/security_categories.py index 0516f0d921..c2618eea63 100644 --- a/cognite/client/_sync_api/iam/security_categories.py +++ b/cognite/client/_sync_api/iam/security_categories.py @@ -1,6 +1,6 @@ """ =============================================================================== -fea2e0196dbe6a2804d937ee40ff61e9 +11b752b7383f113febe1ff2669789673 This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -29,10 +29,10 @@ def list(self, limit: int | None = DEFAULT_LIMIT_READ) -> SecurityCategoryList: `List security categories. `_ Args: - limit (int | None): Max number of security categories to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + limit: Max number of security categories to return. Defaults to 25. Set to -1, float("inf") or None to return all items. Returns: - SecurityCategoryList: List of security categories + List of security categories Example: @@ -64,10 +64,10 @@ def create( `Create one or more security categories. `_ Args: - security_category (SecurityCategory | SecurityCategoryWrite | Sequence[SecurityCategory] | Sequence[SecurityCategoryWrite]): Security category or list of categories to create. + security_category: Security category or list of categories to create. Returns: - SecurityCategory | SecurityCategoryList: The created security category or categories. + The created security category or categories. Example: @@ -87,7 +87,7 @@ def delete(self, id: int | Sequence[int]) -> None: `Delete one or more security categories. `_ Args: - id (int | Sequence[int]): ID or list of IDs of security categories to delete. + id: ID or list of IDs of security categories to delete. Example: diff --git a/cognite/client/_sync_api/iam/sessions.py b/cognite/client/_sync_api/iam/sessions.py index e2cb04770a..a037e09a98 100644 --- a/cognite/client/_sync_api/iam/sessions.py +++ b/cognite/client/_sync_api/iam/sessions.py @@ -1,6 +1,6 @@ """ =============================================================================== -312e9a2bb82dd0860e0c463ca8ef0a85 +8816a2865e5d5598efb02dc405b9124c This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -36,13 +36,8 @@ def create( `Create a session. `_ Args: - client_credentials (ClientCredentials | None): The client credentials to create the session. This is required - if session_type is set to 'CLIENT_CREDENTIALS'. - session_type (SessionType | Literal['DEFAULT']): The type of session to create. Can be - either 'CLIENT_CREDENTIALS', 'TOKEN_EXCHANGE', 'ONESHOT_TOKEN_EXCHANGE' or 'DEFAULT'. - Defaults to 'DEFAULT' which will use -this- AsyncCogniteClient object to create the session. - If this client was created using a token, 'TOKEN_EXCHANGE' will be used, and if - this client was created using client credentials, 'CLIENT_CREDENTIALS' will be used. + client_credentials: The client credentials to create the session. This is required if session_type is set to 'CLIENT_CREDENTIALS'. + session_type: The type of session to create. Can be either 'CLIENT_CREDENTIALS', 'TOKEN_EXCHANGE', 'ONESHOT_TOKEN_EXCHANGE' or 'DEFAULT'. Defaults to 'DEFAULT' which will use -this- AsyncCogniteClient object to create the session. If this client was created using a token, 'TOKEN_EXCHANGE' will be used, and if this client was created using client credentials, 'CLIENT_CREDENTIALS' will be used. Session Types: @@ -51,7 +46,7 @@ def create( * **one_shot_token_exchange**: Credentials for a session using one-shot token exchange to reuse the user's credentials. One-shot sessions are short-lived sessions that are not refreshed and do not require support for token exchange from the identity provider. Returns: - CreatedSession: The object with token inspection details. + The object with token inspection details. """ return run_sync( self.__async_client.iam.sessions.create(client_credentials=client_credentials, session_type=session_type) @@ -68,10 +63,10 @@ def revoke(self, id: int | Sequence[int]) -> Session | SessionList: `Revoke access to a session. Revocation of a session may in some cases take up to 1 hour to take effect. `_ Args: - id (int | Sequence[int]): Id or list of session ids + id: Id or list of session ids Returns: - Session | SessionList: List of revoked sessions. If the user does not have the sessionsAcl:LIST capability, then only the session IDs will be present in the response. + LIST capability, then only the session IDs will be present in the response. """ return run_sync(self.__async_client.iam.sessions.revoke(id=id)) @@ -88,10 +83,10 @@ def retrieve(self, id: int | Sequence[int]) -> Session | SessionList: The request will fail if any of the IDs does not belong to an existing session. Args: - id (int | Sequence[int]): Id or list of session ids + id: Id or list of session ids Returns: - Session | SessionList: Session or list of sessions. + Session or list of sessions. """ return run_sync(self.__async_client.iam.sessions.retrieve(id=id)) @@ -100,10 +95,10 @@ def list(self, status: SessionStatus | None = None, limit: int = DEFAULT_LIMIT_R `List all sessions in the current project. `_ Args: - status (SessionStatus | None): If given, only sessions with the given status are returned. - limit (int): Max number of sessions to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + status: If given, only sessions with the given status are returned. + limit: Max number of sessions to return. Defaults to 25. Set to -1, float("inf") or None to return all items. Returns: - SessionList: a list of sessions in the current project. + a list of sessions in the current project. """ return run_sync(self.__async_client.iam.sessions.list(status=status, limit=limit)) diff --git a/cognite/client/_sync_api/iam/token.py b/cognite/client/_sync_api/iam/token.py index 6e005a9210..79ac3fdbf1 100644 --- a/cognite/client/_sync_api/iam/token.py +++ b/cognite/client/_sync_api/iam/token.py @@ -1,6 +1,6 @@ """ =============================================================================== -d7f8ea05edb54a84534b0b464e2e5645 +984dd3457723af98e27974707ff19bed This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -26,7 +26,7 @@ def inspect(self) -> TokenInspection: Get details about which projects it belongs to and which capabilities are granted to it. Returns: - TokenInspection: The object with token inspection details. + The object with token inspection details. Example: diff --git a/cognite/client/_sync_api/labels.py b/cognite/client/_sync_api/labels.py index a96529cb23..f71355c696 100644 --- a/cognite/client/_sync_api/labels.py +++ b/cognite/client/_sync_api/labels.py @@ -1,6 +1,6 @@ """ =============================================================================== -005b968817547c1aa85158a1cefc7d9d +8b597fb38f38e9152233fd598a0b428c This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -63,15 +63,15 @@ def __call__( Iterate over Labels Args: - chunk_size (int | None): Number of Labels to return in each chunk. Defaults to yielding one Label a time. - name (str | None): returns the label definitions matching that name - external_id_prefix (str | None): filter label definitions with external ids starting with the prefix specified - limit (int | None): Maximum number of label definitions to return. Defaults return all labels. - data_set_ids (int | Sequence[int] | None): return only labels in the data sets with this id / these ids. - data_set_external_ids (str | SequenceNotStr[str] | None): return only labels in the data sets with this external id / these external ids. + chunk_size: Number of Labels to return in each chunk. Defaults to yielding one Label a time. + name: returns the label definitions matching that name + external_id_prefix: filter label definitions with external ids starting with the prefix specified + limit: Maximum number of label definitions to return. Defaults return all labels. + data_set_ids: return only labels in the data sets with this id / these ids. + data_set_external_ids: return only labels in the data sets with this external id / these external ids. Yields: - LabelDefinition | LabelDefinitionList: yields Labels one by one or in chunks. + yields Labels one by one or in chunks. """ # noqa: DOC404 yield from SyncIterator( self.__async_client.labels( @@ -100,11 +100,11 @@ def retrieve( `Retrieve one or more label definitions by external id. `_ Args: - external_id (str | SequenceNotStr[str]): External ID or list of external ids - ignore_unknown_ids (bool): If True, ignore IDs and external IDs that are not found rather than throw an exception. + external_id: External ID or list of external ids + ignore_unknown_ids: If True, ignore IDs and external IDs that are not found rather than throw an exception. Returns: - LabelDefinition | LabelDefinitionList | None: The requested label definition(s) + The requested label definition(s) Examples: @@ -134,14 +134,14 @@ def list( `List Labels `_ Args: - name (str | None): returns the label definitions matching that name - external_id_prefix (str | None): filter label definitions with external ids starting with the prefix specified - data_set_ids (int | Sequence[int] | None): return only labels in the data sets with this id / these ids. - data_set_external_ids (str | SequenceNotStr[str] | None): return only labels in the data sets with this external id / these external ids. - limit (int | None): Maximum number of label definitions to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + name: returns the label definitions matching that name + external_id_prefix: filter label definitions with external ids starting with the prefix specified + data_set_ids: return only labels in the data sets with this id / these ids. + data_set_external_ids: return only labels in the data sets with this external id / these external ids. + limit: Maximum number of label definitions to return. Defaults to 25. Set to -1, float("inf") or None to return all items. Returns: - LabelDefinitionList: List of requested Labels + List of requested Labels Examples: @@ -185,10 +185,10 @@ def create( `Create one or more label definitions. `_ Args: - label (LabelDefinition | LabelDefinitionWrite | Sequence[LabelDefinition | LabelDefinitionWrite]): The label definition(s) to create. + label: The label definition(s) to create. Returns: - LabelDefinition | LabelDefinitionList: Created label definition(s) + Created label definition(s) Raises: TypeError: Function input 'label' is of the wrong type @@ -211,7 +211,7 @@ def delete(self, external_id: str | SequenceNotStr[str] | None = None) -> None: `Delete one or more label definitions `_ Args: - external_id (str | SequenceNotStr[str] | None): One or more label external ids + external_id: One or more label external ids Examples: diff --git a/cognite/client/_sync_api/limits.py b/cognite/client/_sync_api/limits.py index 98a105d639..4ed77b40a3 100644 --- a/cognite/client/_sync_api/limits.py +++ b/cognite/client/_sync_api/limits.py @@ -1,6 +1,6 @@ """ =============================================================================== -6b64355411f69544440ffc5607c18fb1 +59c7f513c0ba8680aaf72801856e9400 This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -33,13 +33,10 @@ def retrieve(self, id: str) -> Limit | None: Retrieves a limit value by its `limitId`. Args: - id (str): Limit ID to retrieve. - Limits are identified by an id containing the service name and a service-scoped limit name. - For instance `atlas.monthly_ai_tokens` is the id of the `atlas` service limit `monthly_ai_tokens`. - Service and limit names are always in `lower_snake_case`. + id: Limit ID to retrieve. Limits are identified by an id containing the service name and a service-scoped limit name. For instance `atlas.monthly_ai_tokens` is the id of the `atlas` service limit `monthly_ai_tokens`. Service and limit names are always in `lower_snake_case`. Returns: - Limit | None: The requested limit, or `None` if not found. + The requested limit, or `None` if not found. Examples: @@ -59,11 +56,11 @@ def list(self, filter: Prefix | None = None, limit: int | None = DEFAULT_LIMIT_R Retrieves all limit values for a specific project. Optionally filter by limit ID prefix using a `Prefix` filter. Args: - filter (Prefix | None): Optional `Prefix` filter to apply on the `limitId` property (only `Prefix` filters are supported). - limit (int | None): Maximum number of limits to return. Defaults to 25. Set to None or -1 to return all limits + filter: Optional `Prefix` filter to apply on the `limitId` property (only `Prefix` filters are supported). + limit: Maximum number of limits to return. Defaults to 25. Set to None or -1 to return all limits Returns: - LimitList: List of all limit values in the project. + List of all limit values in the project. Examples: diff --git a/cognite/client/_sync_api/org_apis/principals.py b/cognite/client/_sync_api/org_apis/principals.py index 1bafe9c391..0145372e66 100644 --- a/cognite/client/_sync_api/org_apis/principals.py +++ b/cognite/client/_sync_api/org_apis/principals.py @@ -1,6 +1,6 @@ """ =============================================================================== -d64b96268879e7c9ac231f45c7982679 +90bb7a312aaf1bad9e12486593e81ddb This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -29,8 +29,7 @@ def me(self) -> Principal: `Get the current caller's information. `_ Returns: - Principal: The principal of the user running the code, i.e. the - principal *this* AsyncCogniteClient was instantiated with. + The principal of the user running the code, i.e. the principal *this* AsyncCogniteClient was instantiated with. Examples: Get your own principal: @@ -72,12 +71,12 @@ def retrieve( `Retrieve principal by reference in the organization `_ Args: - id (str | SequenceNotStr[str] | None): The ID(s) of the principal(s) to retrieve. - external_id (str | SequenceNotStr[str] | None): The external ID(s) of the principal to retrieve. - ignore_unknown_ids (bool): This is only relevant when retrieving multiple principals. If set to True, the method will return the principals that were found and ignore the ones that were not found. If set to False, the method will raise a CogniteAPIError if any of the specified principals were not found. Defaults to False. + id: The ID(s) of the principal(s) to retrieve. + external_id: The external ID(s) of the principal to retrieve. + ignore_unknown_ids: This is only relevant when retrieving multiple principals. If set to True, the method will return the principals that were found and ignore the ones that were not found. If set to False, the method will raise a CogniteAPIError if any of the specified principals were not found. Defaults to False. Returns: - Principal | PrincipalList | None: The principal(s) with the specified ID(s) or external ID(s). + The principal(s) with the specified ID(s) or external ID(s). Examples: Retrieve a principal by ID: @@ -102,11 +101,11 @@ def list(self, types: str | Sequence[str] | None = None, limit: int = DEFAULT_LI `List principals in the organization `_ Args: - types (str | Sequence[str] | None): Filter by principal type(s). Defaults to None, which means no filtering. - limit (int): The maximum number of principals to return. Defaults to 25. + types: Filter by principal type(s). Defaults to None, which means no filtering. + limit: The maximum number of principals to return. Defaults to 25. Returns: - PrincipalList: The principal of the user running the code, i.e. the principal *this* CogniteClient was instantiated with. + The principal of the user running the code, i.e. the principal *this* CogniteClient was instantiated with. Examples: List principals in the organization: diff --git a/cognite/client/_sync_api/postgres_gateway/tables.py b/cognite/client/_sync_api/postgres_gateway/tables.py index 33ffb1de78..e2114ab9d1 100644 --- a/cognite/client/_sync_api/postgres_gateway/tables.py +++ b/cognite/client/_sync_api/postgres_gateway/tables.py @@ -1,6 +1,6 @@ """ =============================================================================== -789d58776aa258529e2d0c9453d9e029 +64e345a44d44613ff09985dcd91f6323 This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -40,11 +40,11 @@ def __call__(self, chunk_size: int | None = None, limit: int | None = None) -> I Fetches custom tables as they are iterated over, so you keep a limited number of custom tables in memory. Args: - chunk_size (int | None): Number of custom tables to return in each chunk. Defaults to yielding one custom table at a time. - limit (int | None): Maximum number of custom tables to return. Defaults to return all. + chunk_size: Number of custom tables to return in each chunk. Defaults to yielding one custom table at a time. + limit: Maximum number of custom tables to return. Defaults to return all. Yields: - pg.Table | pg.TableList: yields Table one by one if chunk_size is not specified, else TableList objects. + yields Table one by one if chunk_size is not specified, else TableList objects. """ yield from SyncIterator(self.__async_client.postgres_gateway.tables(chunk_size=chunk_size, limit=limit)) # type: ignore [misc] @@ -59,11 +59,11 @@ def create(self, username: str, items: pg.TableWrite | Sequence[pg.TableWrite]) `Create tables `_ Args: - username (str): The name of the username (a.k.a. database) to be managed from the API - items (pg.TableWrite | Sequence[pg.TableWrite]): The table(s) to create + username: The name of the username (a.k.a. database) to be managed from the API + items: The table(s) to create Returns: - pg.Table | pg.TableList: Created tables + Created tables Examples: @@ -99,12 +99,12 @@ def retrieve( Retrieve a list of Postgres tables for a user by their table names, optionally ignoring unknown table names Args: - username (str): The username (a.k.a. database) to be managed from the API - tablename (str | SequenceNotStr[str]): The name of the table(s) to be retrieved - ignore_unknown_ids (bool): Ignore table names not found + username: The username (a.k.a. database) to be managed from the API + tablename: The name of the table(s) to be retrieved + ignore_unknown_ids: Ignore table names not found Returns: - pg.Table | pg.TableList | None: Foreign tables + Foreign tables Examples: @@ -132,9 +132,9 @@ def delete(self, username: str, tablename: str | SequenceNotStr[str], ignore_unk `Delete postgres table(s) `_ Args: - username (str): The name of the username (a.k.a. database) to be managed from the API - tablename (str | SequenceNotStr[str]): The name of the table(s) to be deleted - ignore_unknown_ids (bool): Ignore table names that are not found + username: The name of the username (a.k.a. database) to be managed from the API + tablename: The name of the table(s) to be deleted + ignore_unknown_ids: Ignore table names that are not found Examples: @@ -163,12 +163,12 @@ def list( List all tables in a given project. Args: - username (str): The name of the username (a.k.a. database) to be managed from the API - include_built_ins (Literal['yes', 'no'] | None): Determines if API should return built-in tables or not - limit (int | None): Limits the number of results to be returned. + username: The name of the username (a.k.a. database) to be managed from the API + include_built_ins: Determines if API should return built-in tables or not + limit: Limits the number of results to be returned. Returns: - pg.TableList: Foreign tables + Foreign tables Examples: diff --git a/cognite/client/_sync_api/postgres_gateway/users.py b/cognite/client/_sync_api/postgres_gateway/users.py index d1dfc8d9c8..9a68813285 100644 --- a/cognite/client/_sync_api/postgres_gateway/users.py +++ b/cognite/client/_sync_api/postgres_gateway/users.py @@ -1,6 +1,6 @@ """ =============================================================================== -8e339ffece20e9f0685104f8b625f982 +6acd0116cb4337950bd3e077f7b31818 This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -47,11 +47,11 @@ def __call__(self, chunk_size: int | None = None, limit: int | None = None) -> I Fetches user as they are iterated over, so you keep a limited number of users in memory. Args: - chunk_size (int | None): Number of users to return in each chunk. Defaults to yielding one user at a time. - limit (int | None): Maximum number of users to return. Defaults to return all. + chunk_size: Number of users to return in each chunk. Defaults to yielding one user at a time. + limit: Maximum number of users to return. Defaults to return all. Yields: - User | UserList: yields User one by one if chunk_size is not specified, else UserList objects. + yields User one by one if chunk_size is not specified, else UserList objects. """ # noqa: DOC404 yield from SyncIterator(self.__async_client.postgres_gateway.users(chunk_size=chunk_size, limit=limit)) # type: ignore [misc] @@ -68,10 +68,10 @@ def create(self, user: UserWrite | Sequence[UserWrite]) -> UserCreated | UserCre Create postgres users. Args: - user (UserWrite | Sequence[UserWrite]): The user(s) to create. + user: The user(s) to create. Returns: - UserCreated | UserCreatedList: The created user(s) + The created user(s) Examples: @@ -105,10 +105,10 @@ def update(self, items: UserUpdate | UserWrite | Sequence[UserUpdate | UserWrite Update postgres users Args: - items (UserUpdate | UserWrite | Sequence[UserUpdate | UserWrite]): The user(s) to update. + items: The user(s) to update. Returns: - User | UserList: The updated user(s) + The updated user(s) Examples: @@ -136,9 +136,8 @@ def delete(self, username: str | SequenceNotStr[str], ignore_unknown_ids: bool = Delete postgres users Args: - username (str | SequenceNotStr[str]): Usernames of the users to delete. - ignore_unknown_ids (bool): Ignore usernames that are not found - + username: Usernames of the users to delete. + ignore_unknown_ids: Ignore usernames that are not found Examples: @@ -166,11 +165,11 @@ def retrieve(self, username: str | SequenceNotStr[str], ignore_unknown_ids: bool Retrieve a list of postgres users by their usernames, optionally ignoring unknown usernames Args: - username (str | SequenceNotStr[str]): Usernames of the users to retrieve. - ignore_unknown_ids (bool): Ignore usernames that are not found + username: Usernames of the users to retrieve. + ignore_unknown_ids: Ignore usernames that are not found Returns: - User | UserList: The retrieved user(s). + The retrieved user(s). Examples: @@ -194,10 +193,10 @@ def list(self, limit: int = DEFAULT_LIMIT_READ) -> UserList: List all users in a given project. Args: - limit (int): Limits the number of results to be returned. + limit: Limits the number of results to be returned. Returns: - UserList: A list of users + A list of users Examples: diff --git a/cognite/client/_sync_api/raw/databases.py b/cognite/client/_sync_api/raw/databases.py index 7c422cb3e2..7fc43283f5 100644 --- a/cognite/client/_sync_api/raw/databases.py +++ b/cognite/client/_sync_api/raw/databases.py @@ -1,6 +1,6 @@ """ =============================================================================== -85ec28c30c20632fe6f6730bab7663fb +9c32bc5498a4bfa575cd60f771cf81e9 This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -39,11 +39,11 @@ def __call__( Fetches dbs as they are iterated over, so you keep a limited number of dbs in memory. Args: - chunk_size (int | None): Number of dbs to return in each chunk. Defaults to yielding one db a time. - limit (int | None): Maximum number of dbs to return. Defaults to return all items. + chunk_size: Number of dbs to return in each chunk. Defaults to yielding one db a time. + limit: Maximum number of dbs to return. Defaults to return all items. Yields: - Database | DatabaseList: No description. + No description. """ # noqa: DOC404 yield from SyncIterator(self.__async_client.raw.databases(chunk_size=chunk_size, limit=limit)) # type: ignore [misc] @@ -58,10 +58,10 @@ def create(self, name: str | list[str]) -> Database | DatabaseList: `Create one or more databases. `_ Args: - name (str | list[str]): A db name or list of db names to create. + name: A db name or list of db names to create. Returns: - Database | DatabaseList: Database or list of databases that has been created. + Database or list of databases that has been created. Examples: @@ -79,8 +79,8 @@ def delete(self, name: str | SequenceNotStr[str], recursive: bool = False) -> No `Delete one or more databases. `_ Args: - name (str | SequenceNotStr[str]): A db name or list of db names to delete. - recursive (bool): Recursively delete all tables in the database(s). + name: A db name or list of db names to delete. + recursive: Recursively delete all tables in the database(s). Examples: @@ -98,10 +98,10 @@ def list(self, limit: int | None = DEFAULT_LIMIT_READ) -> DatabaseList: `List databases `_ Args: - limit (int | None): Maximum number of databases to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + limit: Maximum number of databases to return. Defaults to 25. Set to -1, float("inf") or None to return all items. Returns: - DatabaseList: List of requested databases. + List of requested databases. Examples: diff --git a/cognite/client/_sync_api/raw/rows.py b/cognite/client/_sync_api/raw/rows.py index ece8b6be43..fdad71b77a 100644 --- a/cognite/client/_sync_api/raw/rows.py +++ b/cognite/client/_sync_api/raw/rows.py @@ -1,6 +1,6 @@ """ =============================================================================== -65855d21c810dfa250de3af741093053 +a2cb436e54959d0d64f7b634c8888eb3 This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -100,20 +100,17 @@ def __call__( by halting retrieval speed when the callers code can't keep up. Args: - db_name (str): Name of the database. - table_name (str): Name of the table. - chunk_size (int | None): Number of rows to return in each chunk (may be lower). Defaults to yielding one row at a time. - Note: When used together with 'partitions' the default is 10000 (matching the API limit) and there's an implicit minimum of 1000 rows. - partitions (int | None): Retrieve rows in parallel using this number of workers. Defaults to not use concurrency. - The setting is capped at ``global_config.concurrency_settings.raw.read`` and _can_ be used with a finite limit. To prevent unexpected problems - and maximize read throughput, check out `concurrency limits in the API documentation. `_ - limit (int | None): Maximum number of rows to return. Can be used with partitions. Defaults to returning all items. - min_last_updated_time (int | None): Rows must have been last updated after this time (exclusive). Milliseconds since epoch. - max_last_updated_time (int | None): Rows must have been last updated before this time (inclusive). Milliseconds since epoch. - columns (list[str] | None): List of column keys. Set to `None` to retrieving all, use empty list, [], to retrieve only row keys. + db_name: Name of the database. + table_name: Name of the table. + chunk_size: Number of rows to return in each chunk (may be lower). Defaults to yielding one row at a time. Note: When used together with 'partitions' the default is 10000 (matching the API limit) and there's an implicit minimum of 1000 rows. + partitions: Retrieve rows in parallel using this number of workers. Defaults to not use concurrency. The setting is capped at ``global_config.concurrency_settings.raw.read`` and _can_ be used with a finite limit. To prevent unexpected problems and maximize read throughput, check out `concurrency limits in the API documentation. `_ + limit: Maximum number of rows to return. Can be used with partitions. Defaults to returning all items. + min_last_updated_time: Rows must have been last updated after this time (exclusive). Milliseconds since epoch. + max_last_updated_time: Rows must have been last updated before this time (inclusive). Milliseconds since epoch. + columns: List of column keys. Set to `None` to retrieving all, use empty list, [], to retrieve only row keys. Yields: - Row | RowList: An iterator yielding the requested row or rows. + An iterator yielding the requested row or rows. """ # noqa: DOC404 yield from SyncIterator( self.__async_client.raw.rows( @@ -139,10 +136,10 @@ def insert( `Insert one or more rows into a table. `_ Args: - db_name (str): Name of the database. - table_name (str): Name of the table. - row (Sequence[Row] | Sequence[RowWrite] | Row | RowWrite | dict): The row(s) to insert - ensure_parent (bool): Create database/table if they don't already exist. + db_name: Name of the database. + table_name: Name of the table. + row: The row(s) to insert + ensure_parent: Create database/table if they don't already exist. Examples: @@ -179,11 +176,11 @@ def insert_dataframe( Uses index for row keys. Args: - db_name (str): Name of the database. - table_name (str): Name of the table. - dataframe (pd.DataFrame): The dataframe to insert. Index will be used as row keys. - ensure_parent (bool): Create database/table if they don't already exist. - dropna (bool): Remove NaNs (but keep None's in dtype=object columns) before inserting. Done individually per column. Default: True + db_name: Name of the database. + table_name: Name of the table. + dataframe: The dataframe to insert. Index will be used as row keys. + ensure_parent: Create database/table if they don't already exist. + dropna: Remove NaNs (but keep None's in dtype=object columns) before inserting. Done individually per column. Default: True Examples: @@ -211,9 +208,9 @@ def delete(self, db_name: str, table_name: str, key: str | SequenceNotStr[str]) `Delete rows from a table. `_ Args: - db_name (str): Name of the database. - table_name (str): Name of the table. - key (str | SequenceNotStr[str]): The key(s) of the row(s) to delete. + db_name: Name of the database. + table_name: Name of the table. + key: The key(s) of the row(s) to delete. Examples: @@ -232,12 +229,12 @@ def retrieve(self, db_name: str, table_name: str, key: str) -> Row | None: `Retrieve a single row by key. `_ Args: - db_name (str): Name of the database. - table_name (str): Name of the table. - key (str): The key of the row to retrieve. + db_name: Name of the database. + table_name: Name of the table. + key: The key of the row to retrieve. Returns: - Row | None: The requested row. + The requested row. Examples: @@ -273,21 +270,18 @@ def retrieve_dataframe( Rowkeys are used as the index. Args: - db_name (str): Name of the database. - table_name (str): Name of the table. - min_last_updated_time (int | None): Rows must have been last updated after this time. Milliseconds since epoch. - max_last_updated_time (int | None): Rows must have been last updated before this time. Milliseconds since epoch. - columns (list[str] | None): List of column keys. Set to `None` to retrieving all, use empty list, [], to retrieve only row keys. - limit (int | None): The number of rows to retrieve. Defaults to 25. Set to -1, float("inf") or None to return all items. - partitions (int | None): Retrieve rows in parallel using this number of workers. Can be used together with a (large) finite limit. - When partitions is not passed, it defaults to 1, i.e. no concurrency for a finite limit and ``global_config.concurrency_settings.raw.read`` - for an unlimited query (will be capped at this value). To prevent unexpected problems and maximize read throughput, check out - `concurrency limits in the API documentation. `_ - last_updated_time_in_index (bool): Use a MultiIndex with row keys and last_updated_time as index. - infer_dtypes (bool): If True, pandas will try to infer dtypes of the columns. Defaults to True. + db_name: Name of the database. + table_name: Name of the table. + min_last_updated_time: Rows must have been last updated after this time. Milliseconds since epoch. + max_last_updated_time: Rows must have been last updated before this time. Milliseconds since epoch. + columns: List of column keys. Set to `None` to retrieving all, use empty list, [], to retrieve only row keys. + limit: The number of rows to retrieve. Defaults to 25. Set to -1, float("inf") or None to return all items. + partitions: Retrieve rows in parallel using this number of workers. Can be used together with a (large) finite limit. When partitions is not passed, it defaults to 1, i.e. no concurrency for a finite limit and ``global_config.concurrency_settings.raw.read`` for an unlimited query (will be capped at this value). To prevent unexpected problems and maximize read throughput, check out `concurrency limits in the API documentation. `_ + last_updated_time_in_index: Use a MultiIndex with row keys and last_updated_time as index. + infer_dtypes: If True, pandas will try to infer dtypes of the columns. Defaults to True. Returns: - pd.DataFrame: The requested rows in a pandas dataframe. + The requested rows in a pandas dataframe. Examples: @@ -326,19 +320,16 @@ def list( `List rows in a table. `_ Args: - db_name (str): Name of the database. - table_name (str): Name of the table. - min_last_updated_time (int | None): Rows must have been last updated after this time (exclusive). Milliseconds since epoch. - max_last_updated_time (int | None): Rows must have been last updated before this time (inclusive). Milliseconds since epoch. - columns (list[str] | None): List of column keys. Set to `None` to retrieving all, use empty list, [], to retrieve only row keys. - limit (int | None): The number of rows to retrieve. Can be used with partitions. Defaults to 25. Set to -1, float("inf") or None to return all items. - partitions (int | None): Retrieve rows in parallel using this number of workers. Can be used together with a (large) finite limit. - When partitions is not passed, it defaults to 1, i.e. no concurrency for a finite limit and ``global_config.concurrency_settings.raw.read`` - for an unlimited query (will be capped at this value). To prevent unexpected problems and maximize read throughput, check out - `concurrency limits in the API documentation. `_ + db_name: Name of the database. + table_name: Name of the table. + min_last_updated_time: Rows must have been last updated after this time (exclusive). Milliseconds since epoch. + max_last_updated_time: Rows must have been last updated before this time (inclusive). Milliseconds since epoch. + columns: List of column keys. Set to `None` to retrieving all, use empty list, [], to retrieve only row keys. + limit: The number of rows to retrieve. Can be used with partitions. Defaults to 25. Set to -1, float("inf") or None to return all items. + partitions: Retrieve rows in parallel using this number of workers. Can be used together with a (large) finite limit. When partitions is not passed, it defaults to 1, i.e. no concurrency for a finite limit and ``global_config.concurrency_settings.raw.read`` for an unlimited query (will be capped at this value). To prevent unexpected problems and maximize read throughput, check out `concurrency limits in the API documentation. `_ Returns: - RowList: The requested rows. + The requested rows. Examples: diff --git a/cognite/client/_sync_api/raw/tables.py b/cognite/client/_sync_api/raw/tables.py index b309ef8063..123ac73955 100644 --- a/cognite/client/_sync_api/raw/tables.py +++ b/cognite/client/_sync_api/raw/tables.py @@ -1,6 +1,6 @@ """ =============================================================================== -48028ade158f6c6a12a7ff231fe10c87 +92fe9d80e648744edfc9807377889346 This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -39,12 +39,12 @@ def __call__( Fetches tables as they are iterated over, so you keep a limited number of tables in memory. Args: - db_name (str): Name of the database to iterate over tables for - chunk_size (int | None): Number of tables to return in each chunk. Defaults to yielding one table a time. - limit (int | None): Maximum number of tables to return. Defaults to return all items. + db_name: Name of the database to iterate over tables for + chunk_size: Number of tables to return in each chunk. Defaults to yielding one table a time. + limit: Maximum number of tables to return. Defaults to return all items. Yields: - raw.Table | raw.TableList: The tables in the database. + The tables in the database. """ yield from SyncIterator(self.__async_client.raw.tables(db_name=db_name, chunk_size=chunk_size, limit=limit)) # type: ignore [call-overload] @@ -59,11 +59,11 @@ def create(self, db_name: str, name: str | list[str]) -> raw.Table | raw.TableLi `Create one or more tables. `_ Args: - db_name (str): Database to create the tables in. - name (str | list[str]): A table name or list of table names to create. + db_name: Database to create the tables in. + name: A table name or list of table names to create. Returns: - raw.Table | raw.TableList: raw.Table or list of tables that has been created. + raw.Table or list of tables that has been created. Examples: @@ -81,8 +81,8 @@ def delete(self, db_name: str, name: str | SequenceNotStr[str]) -> None: `Delete one or more tables. `_ Args: - db_name (str): Database to delete tables from. - name (str | SequenceNotStr[str]): A table name or list of table names to delete. + db_name: Database to delete tables from. + name: A table name or list of table names to delete. Examples: @@ -100,11 +100,11 @@ def list(self, db_name: str, limit: int | None = DEFAULT_LIMIT_READ) -> raw.Tabl `List tables `_ Args: - db_name (str): The database to list tables from. - limit (int | None): Maximum number of tables to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + db_name: The database to list tables from. + limit: Maximum number of tables to return. Defaults to 25. Set to -1, float("inf") or None to return all items. Returns: - raw.TableList: List of requested tables. + List of requested tables. Examples: diff --git a/cognite/client/_sync_api/relationships.py b/cognite/client/_sync_api/relationships.py index 616eac5f9a..7b7f58f6d9 100644 --- a/cognite/client/_sync_api/relationships.py +++ b/cognite/client/_sync_api/relationships.py @@ -1,6 +1,6 @@ """ =============================================================================== -fe09ebcbcb830ad53c6070c9d3d75614 +d65ed29d7fa57070fe41d79eac5099c7 This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -100,25 +100,25 @@ def __call__( Fetches relationships as they are iterated over, so you keep a limited number of relationships in memory. Args: - chunk_size (int | None): Number of Relationships to return in each chunk. Defaults to yielding one relationship at a time. - source_external_ids (SequenceNotStr[str] | None): Include relationships that have any of these values in their source External Id field - source_types (SequenceNotStr[str] | None): Include relationships that have any of these values in their source Type field - target_external_ids (SequenceNotStr[str] | None): Include relationships that have any of these values in their target External Id field - target_types (SequenceNotStr[str] | None): Include relationships that have any of these values in their target Type field - data_set_ids (int | Sequence[int] | None): Return only relationships in the specified data set(s) with this id / these ids. - data_set_external_ids (str | SequenceNotStr[str] | None): Return only relationships in the specified data set(s) with this external id / these external ids. - start_time (dict[str, int] | None): Range between two timestamps, minimum and maximum milliseconds (inclusive) - end_time (dict[str, int] | None): Range between two timestamps, minimum and maximum milliseconds (inclusive) - confidence (dict[str, int] | None): Range to filter the field for (inclusive). - last_updated_time (dict[str, int] | None): Range to filter the field for (inclusive). - created_time (dict[str, int] | None): Range to filter the field for (inclusive). - active_at_time (dict[str, int] | None): Limits results to those active at any point within the given time range, i.e. if there is any overlap in the intervals [activeAtTime.min, activeAtTime.max] and [startTime, endTime], where both intervals are inclusive. If a relationship does not have a startTime, it is regarded as active from the beginning of time by this filter. If it does not have an endTime is will be regarded as active until the end of time. Similarly, if a min is not supplied to the filter, the min will be implicitly set to the beginning of time, and if a max is not supplied, the max will be implicitly set to the end of time. - labels (LabelFilter | None): Return only the resource matching the specified label constraints. - limit (int | None): No description. - fetch_resources (bool): No description. + chunk_size: Number of Relationships to return in each chunk. Defaults to yielding one relationship at a time. + source_external_ids: Include relationships that have any of these values in their source External Id field + source_types: Include relationships that have any of these values in their source Type field + target_external_ids: Include relationships that have any of these values in their target External Id field + target_types: Include relationships that have any of these values in their target Type field + data_set_ids: Return only relationships in the specified data set(s) with this id / these ids. + data_set_external_ids: Return only relationships in the specified data set(s) with this external id / these external ids. + start_time: Range between two timestamps, minimum and maximum milliseconds (inclusive) + end_time: Range between two timestamps, minimum and maximum milliseconds (inclusive) + confidence: Range to filter the field for (inclusive). + last_updated_time: Range to filter the field for (inclusive). + created_time: Range to filter the field for (inclusive). + active_at_time: Limits results to those active at any point within the given time range, i.e. if there is any overlap in the intervals [activeAtTime.min, activeAtTime.max] and [startTime, endTime], where both intervals are inclusive. If a relationship does not have a startTime, it is regarded as active from the beginning of time by this filter. If it does not have an endTime is will be regarded as active until the end of time. Similarly, if a min is not supplied to the filter, the min will be implicitly set to the beginning of time, and if a max is not supplied, the max will be implicitly set to the end of time. + labels: Return only the resource matching the specified label constraints. + limit: No description. + fetch_resources: No description. Yields: - Relationship | RelationshipList: yields Relationship one by one if chunk_size is not specified, else RelationshipList objects. + yields Relationship one by one if chunk_size is not specified, else RelationshipList objects. """ # noqa: DOC404 yield from SyncIterator( self.__async_client.relationships( @@ -146,11 +146,11 @@ def retrieve(self, external_id: str, fetch_resources: bool = False) -> Relations Retrieve a single relationship by external id. Args: - external_id (str): External ID - fetch_resources (bool): If true, will try to return the full resources referenced by the relationship in the source and target fields. + external_id: External ID + fetch_resources: If true, will try to return the full resources referenced by the relationship in the source and target fields. Returns: - Relationship | None: Requested relationship or None if it does not exist. + Requested relationship or None if it does not exist. Examples: @@ -172,13 +172,12 @@ def retrieve_multiple( `Retrieve multiple relationships by external id. `_ Args: - external_ids (SequenceNotStr[str]): External IDs - fetch_resources (bool): If true, will try to return the full resources referenced by the relationship in the - source and target fields. - ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception. + external_ids: External IDs + fetch_resources: If true, will try to return the full resources referenced by the relationship in the source and target fields. + ignore_unknown_ids: Ignore IDs and external IDs that are not found rather than throw an exception. Returns: - RelationshipList: The requested relationships. + The requested relationships. Examples: @@ -218,25 +217,25 @@ def list( `Lists relationships stored in the project based on a query filter given in the payload of this request. Up to 1000 relationships can be retrieved in one operation. `_ Args: - source_external_ids (SequenceNotStr[str] | None): Include relationships that have any of these values in their source External Id field - source_types (SequenceNotStr[str] | None): Include relationships that have any of these values in their source Type field - target_external_ids (SequenceNotStr[str] | None): Include relationships that have any of these values in their target External Id field - target_types (SequenceNotStr[str] | None): Include relationships that have any of these values in their target Type field - data_set_ids (int | Sequence[int] | None): Return only relationships in the specified data set(s) with this id / these ids. - data_set_external_ids (str | SequenceNotStr[str] | None): Return only relationships in the specified data set(s) with this external id / these external ids. - start_time (dict[str, int] | None): Range between two timestamps, minimum and maximum milliseconds (inclusive) - end_time (dict[str, int] | None): Range between two timestamps, minimum and maximum milliseconds (inclusive) - confidence (dict[str, int] | None): Range to filter the field for (inclusive). - last_updated_time (dict[str, int] | None): Range to filter the field for (inclusive). - created_time (dict[str, int] | None): Range to filter the field for (inclusive). - active_at_time (dict[str, int] | None): Limits results to those active at any point within the given time range, i.e. if there is any overlap in the intervals [activeAtTime.min, activeAtTime.max] and [startTime, endTime], where both intervals are inclusive. If a relationship does not have a startTime, it is regarded as active from the beginning of time by this filter. If it does not have an endTime is will be regarded as active until the end of time. Similarly, if a min is not supplied to the filter, the min will be implicitly set to the beginning of time, and if a max is not supplied, the max will be implicitly set to the end of time. - labels (LabelFilter | None): Return only the resource matching the specified label constraints. - limit (int | None): Maximum number of relationships to return. Defaults to 25. Set to -1, float("inf") or None to return all items. - partitions (int | None): Retrieve relationships in parallel using this number of workers. Also requires `limit=None` to be passed. - fetch_resources (bool): if true, will try to return the full resources referenced by the relationship in the source and target fields. + source_external_ids: Include relationships that have any of these values in their source External Id field + source_types: Include relationships that have any of these values in their source Type field + target_external_ids: Include relationships that have any of these values in their target External Id field + target_types: Include relationships that have any of these values in their target Type field + data_set_ids: Return only relationships in the specified data set(s) with this id / these ids. + data_set_external_ids: Return only relationships in the specified data set(s) with this external id / these external ids. + start_time: Range between two timestamps, minimum and maximum milliseconds (inclusive) + end_time: Range between two timestamps, minimum and maximum milliseconds (inclusive) + confidence: Range to filter the field for (inclusive). + last_updated_time: Range to filter the field for (inclusive). + created_time: Range to filter the field for (inclusive). + active_at_time: Limits results to those active at any point within the given time range, i.e. if there is any overlap in the intervals [activeAtTime.min, activeAtTime.max] and [startTime, endTime], where both intervals are inclusive. If a relationship does not have a startTime, it is regarded as active from the beginning of time by this filter. If it does not have an endTime is will be regarded as active until the end of time. Similarly, if a min is not supplied to the filter, the min will be implicitly set to the beginning of time, and if a max is not supplied, the max will be implicitly set to the end of time. + labels: Return only the resource matching the specified label constraints. + limit: Maximum number of relationships to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + partitions: Retrieve relationships in parallel using this number of workers. Also requires `limit=None` to be passed. + fetch_resources: if true, will try to return the full resources referenced by the relationship in the source and target fields. Returns: - RelationshipList: List of requested relationships + List of requested relationships Examples: @@ -286,10 +285,10 @@ def create( `Create one or more relationships. `_ Args: - relationship (Relationship | RelationshipWrite | Sequence[Relationship | RelationshipWrite]): Relationship or list of relationships to create. + relationship: Relationship or list of relationships to create. Returns: - Relationship | RelationshipList: Created relationship(s) + Created relationship(s) Note: - The source_type and target_type field in the Relationship(s) can be any string among "Asset", "TimeSeries", "File", "Event", "Sequence". @@ -344,11 +343,11 @@ def update( Currently, a full replacement of labels on a relationship is not supported (only partial add/remove updates). See the example below on how to perform partial labels update. Args: - item (Relationship | RelationshipWrite | RelationshipUpdate | Sequence[Relationship | RelationshipWrite | RelationshipUpdate]): Relationship(s) to update - mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (Relationship or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. + item: Relationship(s) to update + mode: How to update data when a non-update object is given (Relationship or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. Returns: - Relationship | RelationshipList: Updated relationship(s) + Updated relationship(s) Examples: Update a data set that you have fetched. This will perform a full update of the data set: @@ -406,11 +405,11 @@ def upsert( For more details, see :ref:`appendix-upsert`. Args: - item (Relationship | RelationshipWrite | Sequence[Relationship | RelationshipWrite]): Relationship or list of relationships to upsert. - mode (Literal['patch', 'replace']): Whether to patch or replace in the case the relationships are existing. If you set 'patch', the call will only update fields with non-null values (default). Setting 'replace' will unset any fields that are not specified. + item: Relationship or list of relationships to upsert. + mode: Whether to patch or replace in the case the relationships are existing. If you set 'patch', the call will only update fields with non-null values (default). Setting 'replace' will unset any fields that are not specified. Returns: - Relationship | RelationshipList: The upserted relationship(s). + The upserted relationship(s). Examples: @@ -438,8 +437,8 @@ def delete(self, external_id: str | SequenceNotStr[str], ignore_unknown_ids: boo `Delete one or more relationships. `_ Args: - external_id (str | SequenceNotStr[str]): External ID or list of external ids - ignore_unknown_ids (bool): Ignore external IDs that are not found rather than throw an exception. + external_id: External ID or list of external ids + ignore_unknown_ids: Ignore external IDs that are not found rather than throw an exception. Examples: Delete relationships by external id: diff --git a/cognite/client/_sync_api/sequence_data.py b/cognite/client/_sync_api/sequence_data.py index c67e7189e5..abbd2e4c4c 100644 --- a/cognite/client/_sync_api/sequence_data.py +++ b/cognite/client/_sync_api/sequence_data.py @@ -1,6 +1,6 @@ """ =============================================================================== -ac7597ce6ff7edc9d99e5f720b375c96 +324a45aa6edbe9caf933c545235285f7 This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -40,10 +40,10 @@ def insert( `Insert rows into a sequence `_ Args: - rows (SequenceRows | dict[int, typing.Sequence[int | float | str]] | typing.Sequence[tuple[int, typing.Sequence[int | float | str]]] | typing.Sequence[dict[str, Any]]): The rows you wish to insert. Can either be a list of tuples, a list of {"rowNumber":... ,"values": ...} objects, a dictionary of rowNumber: data, or a SequenceData object. See examples below. - columns (SequenceNotStr[str] | None): List of external id for the columns of the sequence. - id (int | None): Id of sequence to insert rows into. - external_id (str | None): External id of sequence to insert rows into. + rows: The rows you wish to insert. Can either be a list of tuples, a list of {"rowNumber":... ,"values": ...} objects, a dictionary of rowNumber: data, or a SequenceData object. See examples below. + columns: List of external id for the columns of the sequence. + id: Id of sequence to insert rows into. + external_id: External id of sequence to insert rows into. Examples: Your rows of data can be a list of tuples where the first element is the rownumber and the second element is the data to be inserted: @@ -92,10 +92,10 @@ def insert_dataframe( The sequence and columns must already exist. Args: - dataframe (pd.DataFrame): Pandas DataFrame object containing the sequence data. - id (int | None): Id of sequence to insert rows into. - external_id (str | None): External id of sequence to insert rows into. - dropna (bool): Whether to drop rows where all values are missing. Default: True. + dataframe: Pandas DataFrame object containing the sequence data. + id: Id of sequence to insert rows into. + external_id: External id of sequence to insert rows into. + dropna: Whether to drop rows where all values are missing. Default: True. Examples: Insert three rows into columns 'col_a' and 'col_b' of the sequence with id=123: @@ -118,9 +118,9 @@ def delete(self, rows: typing.Sequence[int], id: int | None = None, external_id: `Delete rows from a sequence `_ Args: - rows (typing.Sequence[int]): List of row numbers. - id (int | None): Id of sequence to delete rows from. - external_id (str | None): External id of sequence to delete rows from. + rows: List of row numbers. + id: Id of sequence to delete rows from. + external_id: External id of sequence to delete rows from. Examples: @@ -138,10 +138,10 @@ def delete_range(self, start: int, end: int | None, id: int | None = None, exter `Delete a range of rows from a sequence. Note this operation is potentially slow, as retrieves each row before deleting. `_ Args: - start (int): Row number to start from (inclusive). - end (int | None): Upper limit on the row number (exclusive). Set to None or -1 to delete all rows until end of sequence. - id (int | None): Id of sequence to delete rows from. - external_id (str | None): External id of sequence to delete rows from. + start: Row number to start from (inclusive). + end: Upper limit on the row number (exclusive). Set to None or -1 to delete all rows until end of sequence. + id: Id of sequence to delete rows from. + external_id: External id of sequence to delete rows from. Examples: @@ -235,15 +235,15 @@ def retrieve( `Retrieve data from a sequence `_ Args: - external_id (str | SequenceNotStr[str] | None): The external id of the sequence to retrieve from. - id (int | typing.Sequence[int] | None): The internal if the sequence to retrieve from. - start (int): Row number to start from (inclusive). - end (int | None): Upper limit on the row number (exclusive). Set to None or -1 to get all rows until end of sequence. - columns (SequenceNotStr[str] | None): List of external id for the columns of the sequence. If 'None' is passed, all columns will be retrieved. - limit (int | None): Maximum number of rows to return per sequence. Pass None to fetch all (possibly limited by 'end'). + external_id: The external id of the sequence to retrieve from. + id: The internal if the sequence to retrieve from. + start: Row number to start from (inclusive). + end: Upper limit on the row number (exclusive). Set to None or -1 to get all rows until end of sequence. + columns: List of external id for the columns of the sequence. If 'None' is passed, all columns will be retrieved. + limit: Maximum number of rows to return per sequence. Pass None to fetch all (possibly limited by 'end'). Returns: - SequenceRows | SequenceRowsList: SequenceRows if a single identifier was given, else SequenceRowsList + SequenceRows if a single identifier was given, else SequenceRowsList Examples: @@ -273,13 +273,13 @@ def retrieve_last_row( `Retrieves the last row (i.e the row with the highest row number) in a sequence. `_ Args: - id (int | None): Id or list of ids. - external_id (str | None): External id or list of external ids. - columns (SequenceNotStr[str] | None): List of external id for the columns of the sequence. If 'None' is passed, all columns will be retrieved. - before (int | None): (optional, int): Get latest datapoint before this row number. + id: Id or list of ids. + external_id: External id or list of external ids. + columns: List of external id for the columns of the sequence. If 'None' is passed, all columns will be retrieved. + before: Get latest datapoint before this row number. Returns: - SequenceRows: A Datapoints object containing the requested data, or a list of such objects. + A Datapoints object containing the requested data, or a list of such objects. Examples: @@ -310,16 +310,16 @@ def retrieve_dataframe( `Retrieve data from a sequence as a pandas dataframe `_ Args: - start (int): (inclusive) row number to start from. - end (int | None): (exclusive) upper limit on the row number. Set to None or -1 to get all rows until end of sequence. - columns (list[str] | None): List of external id for the columns of the sequence. If 'None' is passed, all columns will be retrieved. - external_id (str | None): External id of sequence. - column_names (str | None): Which field(s) to use as column header. Can use "externalId", "id", "columnExternalId", "id|columnExternalId" or "externalId|columnExternalId". Default is "externalId|columnExternalId" for queries on more than one sequence, and "columnExternalId" for queries on a single sequence. - id (int | None): Id of sequence - limit (int | None): Maximum number of rows to return per sequence. + start: (inclusive) row number to start from. + end: (exclusive) upper limit on the row number. Set to None or -1 to get all rows until end of sequence. + columns: List of external id for the columns of the sequence. If 'None' is passed, all columns will be retrieved. + external_id: External id of sequence. + column_names: Which field(s) to use as column header. Can use "externalId", "id", "columnExternalId", "id|columnExternalId" or "externalId|columnExternalId". Default is "externalId|columnExternalId" for queries on more than one sequence, and "columnExternalId" for queries on a single sequence. + id: Id of sequence + limit: Maximum number of rows to return per sequence. Returns: - pd.DataFrame: The requested sequence data in a pandas DataFrame + The requested sequence data in a pandas DataFrame Examples: >>> from cognite.client import CogniteClient, AsyncCogniteClient diff --git a/cognite/client/_sync_api/sequences.py b/cognite/client/_sync_api/sequences.py index dea3afb8b0..aadcf63b1d 100644 --- a/cognite/client/_sync_api/sequences.py +++ b/cognite/client/_sync_api/sequences.py @@ -1,6 +1,6 @@ """ =============================================================================== -ba1dc9330d8160a0dd16e42e281ac200 +b388f88a590ae6b0a208fb19f1abf3d6 This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -99,23 +99,23 @@ def __call__( Fetches sequences as they are iterated over, so you keep a limited number of objects in memory. Args: - chunk_size (int | None): Number of sequences to return in each chunk. Defaults to yielding one event a time. - name (str | None): Filter out sequences that do not have this *exact* name. - external_id_prefix (str | None): Filter out sequences that do not have this string as the start of the externalId - metadata (dict[str, str] | None): Filter out sequences that do not match these metadata fields and values (case-sensitive). Format is {"key1":"value1","key2":"value2"}. - asset_ids (typing.Sequence[int] | None): Filter out sequences that are not linked to any of these assets. - asset_subtree_ids (int | typing.Sequence[int] | None): Only include sequences that have a related asset in a subtree rooted at any of these assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. - asset_subtree_external_ids (str | SequenceNotStr[str] | None): Only include sequences that have a related asset in a subtree rooted at any of these assetExternalIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. - data_set_ids (int | typing.Sequence[int] | None): Return only sequences in the specified data set(s) with this id / these ids. - data_set_external_ids (str | SequenceNotStr[str] | None): Return only sequences in the specified data set(s) with this external id / these external ids. - created_time (dict[str, Any] | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. - last_updated_time (dict[str, Any] | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. - limit (int | None): Max number of sequences to return. Defaults to return all items. - advanced_filter (Filter | dict[str, Any] | None): Advanced filter query using the filter DSL (Domain Specific Language). It allows defining complex filtering expressions that combine simple operations, such as equals, prefix, exists, etc., using boolean operators and, or, and not. - sort (SortSpec | list[SortSpec] | None): The criteria to sort by. Defaults to desc for `_score_` and asc for all other properties. Sort is not allowed if `partitions` is used. + chunk_size: Number of sequences to return in each chunk. Defaults to yielding one event a time. + name: Filter out sequences that do not have this *exact* name. + external_id_prefix: Filter out sequences that do not have this string as the start of the externalId + metadata: Filter out sequences that do not match these metadata fields and values (case-sensitive). Format is {"key1":"value1","key2":"value2"}. + asset_ids: Filter out sequences that are not linked to any of these assets. + asset_subtree_ids: Only include sequences that have a related asset in a subtree rooted at any of these assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. + asset_subtree_external_ids: Only include sequences that have a related asset in a subtree rooted at any of these assetExternalIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. + data_set_ids: Return only sequences in the specified data set(s) with this id / these ids. + data_set_external_ids: Return only sequences in the specified data set(s) with this external id / these external ids. + created_time: Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. + last_updated_time: Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. + limit: Max number of sequences to return. Defaults to return all items. + advanced_filter: Advanced filter query using the filter DSL (Domain Specific Language). It allows defining complex filtering expressions that combine simple operations, such as equals, prefix, exists, etc., using boolean operators and, or, and not. + sort: The criteria to sort by. Defaults to desc for `_score_` and asc for all other properties. Sort is not allowed if `partitions` is used. Yields: - Sequence | SequenceList: yields Sequence one by one if chunk_size is not specified, else SequenceList objects. + yields Sequence one by one if chunk_size is not specified, else SequenceList objects. """ # noqa: DOC404 yield from SyncIterator( self.__async_client.sequences( @@ -141,11 +141,11 @@ def retrieve(self, id: int | None = None, external_id: str | None = None) -> Seq `Retrieve a single sequence by id. `_ Args: - id (int | None): ID - external_id (str | None): External ID + id: ID + external_id: External ID Returns: - Sequence | None: Requested sequence or None if it does not exist. + Requested sequence or None if it does not exist. Examples: @@ -172,12 +172,12 @@ def retrieve_multiple( `Retrieve multiple sequences by id. `_ Args: - ids (typing.Sequence[int] | None): IDs - external_ids (SequenceNotStr[str] | None): External IDs - ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception. + ids: IDs + external_ids: External IDs + ignore_unknown_ids: Ignore IDs and external IDs that are not found rather than throw an exception. Returns: - SequenceList: The requested sequences. + The requested sequences. Examples: @@ -207,11 +207,11 @@ def aggregate_count( `Count of sequences matching the specified filters and search. `_ Args: - advanced_filter (Filter | dict[str, Any] | None): The filter to narrow down the sequences to count. - filter (SequenceFilter | dict[str, Any] | None): The filter to narrow down sequences to count requiring exact match. + advanced_filter: The filter to narrow down the sequences to count. + filter: The filter to narrow down sequences to count requiring exact match. Returns: - int: The number of sequences matching the specified filters and search. + The number of sequences matching the specified filters and search. Examples: @@ -242,13 +242,13 @@ def aggregate_cardinality_values( `Find approximate property count for sequences. `_ Args: - property (SequenceProperty | str | list[str]): The property to count the cardinality of. - advanced_filter (Filter | dict[str, Any] | None): The filter to narrow down the sequences to count cardinality. - aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets. - filter (SequenceFilter | dict[str, Any] | None): The filter to narrow down the sequences to count requiring exact match. + property: The property to count the cardinality of. + advanced_filter: The filter to narrow down the sequences to count cardinality. + aggregate_filter: The filter to apply to the resulting buckets. + filter: The filter to narrow down the sequences to count requiring exact match. Returns: - int: The number of properties matching the specified filters and search. + The number of properties matching the specified filters and search. Examples: @@ -289,13 +289,13 @@ def aggregate_cardinality_properties( `Find approximate paths count for sequences. `_ Args: - path (SequenceProperty | str | list[str]): The scope in every document to aggregate properties. The only value allowed now is ["metadata"]. It means to aggregate only metadata properties (aka keys). - advanced_filter (Filter | dict[str, Any] | None): The filter to narrow down the sequences to count cardinality. - aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets. - filter (SequenceFilter | dict[str, Any] | None): The filter to narrow down the sequences to count requiring exact match. + path: The scope in every document to aggregate properties. The only value allowed now is ["metadata"]. It means to aggregate only metadata properties (aka keys). + advanced_filter: The filter to narrow down the sequences to count cardinality. + aggregate_filter: The filter to apply to the resulting buckets. + filter: The filter to narrow down the sequences to count requiring exact match. Returns: - int: The number of properties matching the specified filters and search. + The number of properties matching the specified filters and search. Examples: @@ -324,13 +324,13 @@ def aggregate_unique_values( `Get unique paths with counts for sequences. `_ Args: - property (SequenceProperty | str | list[str]): The property to group by. - advanced_filter (Filter | dict[str, Any] | None): The filter to narrow down the sequences to count cardinality. - aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets. - filter (SequenceFilter | dict[str, Any] | None): The filter to narrow down the sequences to count requiring exact match. + property: The property to group by. + advanced_filter: The filter to narrow down the sequences to count cardinality. + aggregate_filter: The filter to apply to the resulting buckets. + filter: The filter to narrow down the sequences to count requiring exact match. Returns: - UniqueResultList: List of unique values of sequences matching the specified filters and search. + List of unique values of sequences matching the specified filters and search. Examples: @@ -380,13 +380,13 @@ def aggregate_unique_properties( `Find approximate unique sequence properties. `_ Args: - path (SequenceProperty | str | list[str]): The scope in every document to aggregate properties. The only value allowed now is ["metadata"]. It means to aggregate only metadata properties (aka keys). - advanced_filter (Filter | dict[str, Any] | None): The filter to narrow down the sequences to count cardinality. - aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets. - filter (SequenceFilter | dict[str, Any] | None): The filter to narrow down the sequences to count requiring exact match. + path: The scope in every document to aggregate properties. The only value allowed now is ["metadata"]. It means to aggregate only metadata properties (aka keys). + advanced_filter: The filter to narrow down the sequences to count cardinality. + aggregate_filter: The filter to apply to the resulting buckets. + filter: The filter to narrow down the sequences to count requiring exact match. Returns: - UniqueResultList: List of unique values of sequences matching the specified filters and search. + List of unique values of sequences matching the specified filters and search. Examples: @@ -417,10 +417,10 @@ def create( `Create one or more sequences. `_ Args: - sequence (Sequence | SequenceWrite | typing.Sequence[Sequence] | typing.Sequence[SequenceWrite]): Sequence or list of Sequence to create. The Sequence columns parameter is a list of objects with fields `externalId` (external id of the column, when omitted, they will be given ids of 'column0, column1, ...'), `valueType` (data type of the column, either STRING, LONG, or DOUBLE, with default DOUBLE), `name`, `description`, `metadata` (optional fields to describe and store information about the data in the column). Other fields will be removed automatically, so a columns definition from a different sequence object can be passed here. + sequence: Sequence or list of Sequence to create. The Sequence columns parameter is a list of objects with fields `externalId` (external id of the column, when omitted, they will be given ids of 'column0, column1, ...'), `valueType` (data type of the column, either STRING, LONG, or DOUBLE, with default DOUBLE), `name`, `description`, `metadata` (optional fields to describe and store information about the data in the column). Other fields will be removed automatically, so a columns definition from a different sequence object can be passed here. Returns: - Sequence | SequenceList: The created sequence(s). + The created sequence(s). Examples: @@ -452,9 +452,9 @@ def delete( `Delete one or more sequences. `_ Args: - id (int | typing.Sequence[int] | None): Id or list of ids - external_id (str | SequenceNotStr[str] | None): External ID or list of external ids - ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception. + id: Id or list of ids + external_id: External ID or list of external ids + ignore_unknown_ids: Ignore IDs and external IDs that are not found rather than throw an exception. Examples: @@ -492,11 +492,11 @@ def update( `Update one or more sequences. `_ Args: - item (Sequence | SequenceWrite | SequenceUpdate | typing.Sequence[Sequence | SequenceWrite | SequenceUpdate]): Sequences to update - mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (Sequence or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. + item: Sequences to update + mode: How to update data when a non-update object is given (Sequence or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. Returns: - Sequence | SequenceList: Updated sequences. + Updated sequences. Examples: @@ -587,11 +587,11 @@ def upsert( For more details, see :ref:`appendix-upsert`. Args: - item (Sequence | SequenceWrite | typing.Sequence[Sequence | SequenceWrite]): Sequence or list of sequences to upsert. - mode (Literal['patch', 'replace']): Whether to patch or replace in the case the sequences are existing. If you set 'patch', the call will only update fields with non-null values (default). Setting 'replace' will unset any fields that are not specified. + item: Sequence or list of sequences to upsert. + mode: Whether to patch or replace in the case the sequences are existing. If you set 'patch', the call will only update fields with non-null values (default). Setting 'replace' will unset any fields that are not specified. Returns: - Sequence | SequenceList: The upserted sequence(s). + The upserted sequence(s). Examples: @@ -625,14 +625,14 @@ def search( Primarily meant for human-centric use-cases and data exploration, not for programs, since matching and ordering may change over time. Use the `list` function if stable or exact matches are required. Args: - name (str | None): Prefix and fuzzy search on name. - description (str | None): Prefix and fuzzy search on description. - query (str | None): Search on name and description using wildcard search on each of the words (separated by spaces). Retrieves results where at least one word must match. Example: 'some other' - filter (SequenceFilter | dict[str, Any] | None): Filter to apply. Performs exact match on these fields. - limit (int): Max number of results to return. + name: Prefix and fuzzy search on name. + description: Prefix and fuzzy search on description. + query: Search on name and description using wildcard search on each of the words (separated by spaces). Retrieves results where at least one word must match. Example: 'some other' + filter: Filter to apply. Performs exact match on these fields. + limit: Max number of results to return. Returns: - SequenceList: The search result as a SequenceList + The search result as a SequenceList Examples: @@ -670,23 +670,23 @@ def list( `List sequences `_ Args: - name (str | None): Filter out sequences that do not have this *exact* name. - external_id_prefix (str | None): Filter out sequences that do not have this string as the start of the externalId - metadata (dict[str, str] | None): Filter out sequences that do not match these metadata fields and values (case-sensitive). Format is {"key1":"value1","key2":"value2"}. - asset_ids (typing.Sequence[int] | None): Filter out sequences that are not linked to any of these assets. - asset_subtree_ids (int | typing.Sequence[int] | None): Only include sequences that have a related asset in a subtree rooted at any of these assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. - asset_subtree_external_ids (str | SequenceNotStr[str] | None): Only include sequences that have a related asset in a subtree rooted at any of these assetExternalIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. - data_set_ids (int | typing.Sequence[int] | None): Return only sequences in the specified data set(s) with this id / these ids. - data_set_external_ids (str | SequenceNotStr[str] | None): Return only sequences in the specified data set(s) with this external id / these external ids. - created_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. - last_updated_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. - limit (int | None): Max number of sequences to return. Defaults to 25. Set to -1, float("inf") or None to return all items. - partitions (int | None): Retrieve resources in parallel using this number of workers (values up to 10 allowed), limit must be set to `None` (or `-1`). - advanced_filter (Filter | dict[str, Any] | None): Advanced filter query using the filter DSL (Domain Specific Language). It allows defining complex filtering expressions that combine simple operations, such as equals, prefix, exists, etc., using boolean operators and, or, and not. See examples below for usage. - sort (SortSpec | list[SortSpec] | None): The criteria to sort by. Defaults to desc for `_score_` and asc for all other properties. Sort is not allowed if `partitions` is used. + name: Filter out sequences that do not have this *exact* name. + external_id_prefix: Filter out sequences that do not have this string as the start of the externalId + metadata: Filter out sequences that do not match these metadata fields and values (case-sensitive). Format is {"key1":"value1","key2":"value2"}. + asset_ids: Filter out sequences that are not linked to any of these assets. + asset_subtree_ids: Only include sequences that have a related asset in a subtree rooted at any of these assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. + asset_subtree_external_ids: Only include sequences that have a related asset in a subtree rooted at any of these assetExternalIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. + data_set_ids: Return only sequences in the specified data set(s) with this id / these ids. + data_set_external_ids: Return only sequences in the specified data set(s) with this external id / these external ids. + created_time: Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. + last_updated_time: Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. + limit: Max number of sequences to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + partitions: Retrieve resources in parallel using this number of workers (values up to 10 allowed), limit must be set to `None` (or `-1`). + advanced_filter: Advanced filter query using the filter DSL (Domain Specific Language). It allows defining complex filtering expressions that combine simple operations, such as equals, prefix, exists, etc., using boolean operators and, or, and not. See examples below for usage. + sort: The criteria to sort by. Defaults to desc for `_score_` and asc for all other properties. Sort is not allowed if `partitions` is used. Returns: - SequenceList: The requested sequences. + The requested sequences. .. note:: When using `partitions`, there are few considerations to keep in mind: diff --git a/cognite/client/_sync_api/simulators/__init__.py b/cognite/client/_sync_api/simulators/__init__.py index 197b3b5ab3..9dc63a5285 100644 --- a/cognite/client/_sync_api/simulators/__init__.py +++ b/cognite/client/_sync_api/simulators/__init__.py @@ -1,6 +1,6 @@ """ =============================================================================== -3956adaf54aea2b5291a5c6ceea13e2d +7e3f167ec3753bdfa8e29705167dfffc This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -51,11 +51,11 @@ def __call__( Fetches simulators as they are iterated over, so you keep a limited number of simulators in memory. Args: - chunk_size (int | None): Number of simulators to return in each chunk. Defaults to yielding one simulator a time. - limit (int | None): Maximum number of simulators to return. Defaults to return all items. + chunk_size: Number of simulators to return in each chunk. Defaults to yielding one simulator a time. + limit: Maximum number of simulators to return. Defaults to return all items. Yields: - Simulator | SimulatorList: yields Simulator one by one if chunk is not specified, else SimulatorList objects. + yields Simulator one by one if chunk is not specified, else SimulatorList objects. """ # noqa: DOC404 yield from SyncIterator(self.__async_client.simulators(chunk_size=chunk_size, limit=limit)) # type: ignore [misc] @@ -64,10 +64,10 @@ def list(self, limit: int | None = DEFAULT_LIMIT_READ) -> SimulatorList: `List all simulators `_ Args: - limit (int | None): Maximum number of results to return. Defaults to 25. Set to -1, float(“inf”) or None to return all items. + limit: Maximum number of results to return. Defaults to 25. Set to -1, float(“inf”) or None to return all items. Returns: - SimulatorList: List of simulators + List of simulators Examples: List simulators: diff --git a/cognite/client/_sync_api/simulators/integrations.py b/cognite/client/_sync_api/simulators/integrations.py index f39844a012..bb333d077d 100644 --- a/cognite/client/_sync_api/simulators/integrations.py +++ b/cognite/client/_sync_api/simulators/integrations.py @@ -1,6 +1,6 @@ """ =============================================================================== -215983ac4df951f01a198f0eca4a529a +ef827367aadc9cbf18d34667c0e76b53 This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -58,13 +58,13 @@ def __call__( Fetches simulator integrations as they are iterated over, so you keep a limited number of simulator integrations in memory. Args: - chunk_size (int | None): Number of simulator integrations to return in each chunk. Defaults to yielding one simulator integration a time. - simulator_external_ids (str | SequenceNotStr[str] | None): Filter on simulator external ids. - active (bool | None): Filter on active status of the simulator integration. - limit (int | None): The maximum number of simulator integrations to return, pass None to return all. + chunk_size: Number of simulator integrations to return in each chunk. Defaults to yielding one simulator integration a time. + simulator_external_ids: Filter on simulator external ids. + active: Filter on active status of the simulator integration. + limit: The maximum number of simulator integrations to return, pass None to return all. Yields: - SimulatorIntegration | SimulatorIntegrationList: yields SimulatorIntegration one by one if chunk_size is not specified, else SimulatorIntegrationList objects. + yields SimulatorIntegration one by one if chunk_size is not specified, else SimulatorIntegrationList objects. """ # noqa: DOC404 yield from SyncIterator( self.__async_client.simulators.integrations( @@ -84,12 +84,12 @@ def list( Retrieves a list of simulator integrations that match the given criteria. Args: - limit (int | None): The maximum number of simulator integrations to return, pass None to return all. - simulator_external_ids (str | SequenceNotStr[str] | None): Filter on simulator external ids. - active (bool | None): Filter on active status of the simulator integration. + limit: The maximum number of simulator integrations to return, pass None to return all. + simulator_external_ids: Filter on simulator external ids. + active: Filter on active status of the simulator integration. Returns: - SimulatorIntegrationList: List of simulator integrations + List of simulator integrations Examples: List a few simulator integrations: @@ -121,8 +121,8 @@ def delete( `Delete simulator integrations `_ Args: - ids (int | Sequence[int] | None): Id(s) of simulator integrations to delete - external_ids (str | SequenceNotStr[str] | None): External_id(s) of simulator integrations to delete + ids: Id(s) of simulator integrations to delete + external_ids: External_id(s) of simulator integrations to delete Examples: Delete simulator integrations by id or external id: diff --git a/cognite/client/_sync_api/simulators/logs.py b/cognite/client/_sync_api/simulators/logs.py index 9b16a42f5c..d10f611213 100644 --- a/cognite/client/_sync_api/simulators/logs.py +++ b/cognite/client/_sync_api/simulators/logs.py @@ -1,6 +1,6 @@ """ =============================================================================== -a0924757f3aa2b1e9014f5bc1247ee5f +b0e214a2da2884ea52bf3bdb08931335 This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -42,10 +42,10 @@ def retrieve(self, ids: int | Sequence[int]) -> SimulatorLogList | SimulatorLog They help users identify issues, diagnose problems, and gain insights into the behavior of the simulator integrations. Args: - ids (int | Sequence[int]): The ids of the simulator log. + ids: The ids of the simulator log. Returns: - SimulatorLogList | SimulatorLog | None: Requested simulator log(s) + Requested simulator log(s) Examples: Get simulator logs by simulator model id: diff --git a/cognite/client/_sync_api/simulators/models.py b/cognite/client/_sync_api/simulators/models.py index a131d4a36d..c86ee3da23 100644 --- a/cognite/client/_sync_api/simulators/models.py +++ b/cognite/client/_sync_api/simulators/models.py @@ -1,6 +1,6 @@ """ =============================================================================== -86ee1f4bac46e6594633a1ea5591447e +1065966c29f66e91017a12cbfb4025d4 This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -47,12 +47,12 @@ def list( Retrieves a list of simulator models that match the given criteria. Args: - limit (int | None): Maximum number of results to return. Defaults to 25. Set to -1, float(“inf”) or None to return all items. - simulator_external_ids (str | SequenceNotStr[str] | None): Filter by simulator external id(s). - sort (PropertySort | None): The criteria to sort by. + limit: Maximum number of results to return. Defaults to 25. Set to -1, float(“inf”) or None to return all items. + simulator_external_ids: Filter by simulator external id(s). + sort: The criteria to sort by. Returns: - SimulatorModelList: List of simulator models + List of simulator models Examples: List simulator models: @@ -102,11 +102,11 @@ def retrieve( Retrieve one or more simulator models by ID(s) or external ID(s). Args: - ids (int | Sequence[int] | None): The id of the simulator model(s). - external_ids (str | SequenceNotStr[str] | None): The external id of the simulator model(s). + ids: The id of the simulator model(s). + external_ids: The external id of the simulator model(s). Returns: - SimulatorModel | SimulatorModelList | None: Requested simulator model(s) + Requested simulator model(s) Examples: Get simulator model by id: @@ -163,13 +163,13 @@ def __call__( Fetches simulator models as they are iterated over, so you keep a limited number of simulator models in memory. Args: - chunk_size (int | None): Number of simulator models to return in each chunk. Defaults to yielding one simulator model a time. - simulator_external_ids (str | SequenceNotStr[str] | None): Filter by simulator external id(s). - sort (PropertySort | None): The criteria to sort by. - limit (int | None): Maximum number of results to return. Defaults to 25. Set to -1, float(“inf”) or None to return all items. + chunk_size: Number of simulator models to return in each chunk. Defaults to yielding one simulator model a time. + simulator_external_ids: Filter by simulator external id(s). + sort: The criteria to sort by. + limit: Maximum number of results to return. Defaults to 25. Set to -1, float(“inf”) or None to return all items. Yields: - SimulatorModel | SimulatorModelList: yields SimulatorModel one by one if chunk is not specified, else SimulatorModelList objects. + yields SimulatorModel one by one if chunk is not specified, else SimulatorModelList objects. """ # noqa: DOC404 yield from SyncIterator( self.__async_client.simulators.models( @@ -188,10 +188,10 @@ def create(self, items: SimulatorModelWrite | Sequence[SimulatorModelWrite]) -> `Create simulator models `_ Args: - items (SimulatorModelWrite | Sequence[SimulatorModelWrite]): The model(s) to create. + items: The model(s) to create. Returns: - SimulatorModel | SimulatorModelList: Created simulator model(s) + Created simulator model(s) Examples: Create new simulator models: @@ -220,8 +220,8 @@ def delete( `Delete simulator models `_ Args: - ids (int | Sequence[int] | None): id (or sequence of ids) for the model(s) to delete. - external_ids (str | SequenceNotStr[str] | None): external id (or sequence of external ids) for the model(s) to delete. + ids: id (or sequence of ids) for the model(s) to delete. + external_ids: external id (or sequence of external ids) for the model(s) to delete. Examples: Delete simulator models by id or external id: @@ -251,10 +251,10 @@ def update( `Update simulator models `_ Args: - items (SimulatorModel | SimulatorModelWrite | SimulatorModelUpdate | Sequence[SimulatorModel | SimulatorModelWrite | SimulatorModelUpdate]): The model to update. + items: The model to update. Returns: - SimulatorModel | SimulatorModelList: Updated simulator model(s) + Updated simulator model(s) Examples: Update a simulator model that you have fetched. This will perform a full update of the model: diff --git a/cognite/client/_sync_api/simulators/models_revisions.py b/cognite/client/_sync_api/simulators/models_revisions.py index 09593107c8..1f9db28933 100644 --- a/cognite/client/_sync_api/simulators/models_revisions.py +++ b/cognite/client/_sync_api/simulators/models_revisions.py @@ -1,6 +1,6 @@ """ =============================================================================== -1d02275dfbb16469b08dc17b88cc9d14 +bece048c2005e7e4d74744d839c58bdd This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -49,15 +49,15 @@ def list( Retrieves a list of simulator model revisions that match the given criteria. Args: - limit (int): Maximum number of results to return. Defaults to 25. Set to -1, float(“inf”) or None to return all items. - sort (PropertySort | None): The criteria to sort by. - model_external_ids (str | SequenceNotStr[str] | None): The external ids of the simulator models to filter by. - all_versions (bool | None): If True, all versions of the simulator model revisions are returned. If False, only the latest version is returned. - created_time (TimestampRange | None): Filter by created time. - last_updated_time (TimestampRange | None): Filter by last updated time. + limit: Maximum number of results to return. Defaults to 25. Set to -1, float(“inf”) or None to return all items. + sort: The criteria to sort by. + model_external_ids: The external ids of the simulator models to filter by. + all_versions: If True, all versions of the simulator model revisions are returned. If False, only the latest version is returned. + created_time: Filter by created time. + last_updated_time: Filter by last updated time. Returns: - SimulatorModelRevisionList: List of simulator model revisions + List of simulator model revisions Examples: List simulator model revisions: @@ -110,11 +110,11 @@ def retrieve( Retrieve one or more simulator model revisions by ID(s) or external ID(s). Args: - ids (int | Sequence[int] | None): The ids of the simulator model revisions. - external_ids (str | SequenceNotStr[str] | None): The external ids of the simulator model revisions. + ids: The ids of the simulator model revisions. + external_ids: The external ids of the simulator model revisions. Returns: - SimulatorModelRevision | SimulatorModelRevisionList | None: Requested simulator model revision(s) + Requested simulator model revision(s) Examples: Get simulator model revision by id: @@ -182,16 +182,16 @@ def __call__( Fetches simulator model revisions as they are iterated over, so you keep a limited number of simulator model revisions in memory. Args: - chunk_size (int | None): Number of simulator model revisions to return in each chunk. Defaults to yielding one simulator model revision a time. - sort (PropertySort | None): The criteria to sort by. - model_external_ids (str | SequenceNotStr[str] | None): The external ids of the simulator models to filter by. - all_versions (bool | None): If True, all versions of the simulator model revisions are returned. If False, only the latest version is returned. - created_time (TimestampRange | None): Filter by created time. - last_updated_time (TimestampRange | None): Filter by last updated time. - limit (int | None): Maximum number of results to return. Defaults to 25. Set to -1, float(“inf”) or None to return all items. + chunk_size: Number of simulator model revisions to return in each chunk. Defaults to yielding one simulator model revision a time. + sort: The criteria to sort by. + model_external_ids: The external ids of the simulator models to filter by. + all_versions: If True, all versions of the simulator model revisions are returned. If False, only the latest version is returned. + created_time: Filter by created time. + last_updated_time: Filter by last updated time. + limit: Maximum number of results to return. Defaults to 25. Set to -1, float(“inf”) or None to return all items. Yields: - SimulatorModelRevision | SimulatorModelRevisionList: yields SimulatorModelRevision one by one if chunk is not specified, else SimulatorModelRevisionList objects. + yields SimulatorModelRevision one by one if chunk is not specified, else SimulatorModelRevisionList objects. """ # noqa: DOC404 yield from SyncIterator( self.__async_client.simulators.models.revisions( @@ -218,10 +218,10 @@ def create( `Create simulator model revisions `_ Args: - items (SimulatorModelRevisionWrite | Sequence[SimulatorModelRevisionWrite]): The model revision(s) to create. + items: The model revision(s) to create. Returns: - SimulatorModelRevision | SimulatorModelRevisionList: Created simulator model revision(s) + Created simulator model revision(s) Examples: Create new simulator model revisions: @@ -261,9 +261,9 @@ def retrieve_data(self, model_revision_external_id: str) -> SimulatorModelRevisi Retrieves a list of simulator model revisions data that match the given criteria. Args: - model_revision_external_id (str): The external id of the simulator model revision to filter by. + model_revision_external_id: The external id of the simulator model revision to filter by. Returns: - SimulatorModelRevisionDataList: List of simulator model revision data + List of simulator model revision data Examples: List simulator model revision data: diff --git a/cognite/client/_sync_api/simulators/routine_revisions.py b/cognite/client/_sync_api/simulators/routine_revisions.py index 0ce5d9f054..fc47479fcb 100644 --- a/cognite/client/_sync_api/simulators/routine_revisions.py +++ b/cognite/client/_sync_api/simulators/routine_revisions.py @@ -1,6 +1,6 @@ """ =============================================================================== -ff594df7d1b2db4d55aa175696b87157 +59e08b3e8ed6c3221b448ba843da4197 This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -84,20 +84,20 @@ def __call__( Fetches simulator routine revisions as they are iterated over, so you keep a limited number of simulator routine revisions in memory. Args: - chunk_size (int | None): Number of simulator routine revisions to return in each chunk. Defaults to yielding one simulator routine revision a time. - routine_external_ids (SequenceNotStr[str] | None): Filter on routine external ids. - model_external_ids (SequenceNotStr[str] | None): Filter on model external ids. - simulator_integration_external_ids (SequenceNotStr[str] | None): Filter on simulator integration external ids. - simulator_external_ids (SequenceNotStr[str] | None): Filter on simulator external ids. - kind (Literal['long'] | None): Filter by routine kind. Note that this filter cannot be applied when 'include_all_fields' set to 'True' in the same query. - created_time (TimestampRange | None): Filter on created time. - all_versions (bool): If all versions of the routine should be returned. Defaults to false which only returns the latest version. - include_all_fields (bool): If all fields should be included in the response. Defaults to false which does not include script, configuration.inputs and configuration.outputs in the response. - limit (int | None): Maximum number of simulator routine revisions to return. Defaults to return all items. - sort (PropertySort | None): The criteria to sort by. + chunk_size: Number of simulator routine revisions to return in each chunk. Defaults to yielding one simulator routine revision a time. + routine_external_ids: Filter on routine external ids. + model_external_ids: Filter on model external ids. + simulator_integration_external_ids: Filter on simulator integration external ids. + simulator_external_ids: Filter on simulator external ids. + kind: Filter by routine kind. Note that this filter cannot be applied when 'include_all_fields' set to 'True' in the same query. + created_time: Filter on created time. + all_versions: If all versions of the routine should be returned. Defaults to false which only returns the latest version. + include_all_fields: If all fields should be included in the response. Defaults to false which does not include script, configuration.inputs and configuration.outputs in the response. + limit: Maximum number of simulator routine revisions to return. Defaults to return all items. + sort: The criteria to sort by. Yields: - SimulatorRoutineRevision | SimulatorRoutineRevisionList: yields SimulatorRoutineRevision one by one if chunk is not specified, else SimulatorRoutineRevisionList objects. + yields SimulatorRoutineRevision one by one if chunk is not specified, else SimulatorRoutineRevisionList objects. """ # noqa: DOC404 yield from SyncIterator( self.__async_client.simulators.routines.revisions( @@ -136,11 +136,11 @@ def retrieve( Retrieve simulator routine revisions by ID or External Id. Args: - ids (int | Sequence[int] | None): Simulator routine revision ID or list of IDs - external_ids (str | SequenceNotStr[str] | None): Simulator routine revision External ID or list of external IDs + ids: Simulator routine revision ID or list of IDs + external_ids: Simulator routine revision External ID or list of external IDs Returns: - SimulatorRoutineRevision | SimulatorRoutineRevisionList | None: Requested simulator routine revision + Requested simulator routine revision Examples: Get simulator routine revision by id: @@ -171,10 +171,10 @@ def create( `Create simulator routine revisions `_ Args: - items (SimulatorRoutineRevisionWrite | Sequence[SimulatorRoutineRevisionWrite]): Simulator routine revisions to create. + items: Simulator routine revisions to create. Returns: - SimulatorRoutineRevision | SimulatorRoutineRevisionList: Created simulator routine revision(s) + Created simulator routine revision(s) Examples: Create new simulator routine revisions: @@ -301,19 +301,19 @@ def list( Retrieves a list of simulator routine revisions that match the given criteria. Args: - routine_external_ids (SequenceNotStr[str] | None): Filter on routine external ids. - model_external_ids (SequenceNotStr[str] | None): Filter on model external ids. - simulator_integration_external_ids (SequenceNotStr[str] | None): Filter on simulator integration external ids. - simulator_external_ids (SequenceNotStr[str] | None): Filter on simulator external ids. - kind (Literal['long'] | None): Filter by routine kind. Note that this filter cannot be applied when 'include_all_fields' set to 'True' in the same query. - created_time (TimestampRange | None): Filter on created time. - all_versions (bool): If all versions of the routine should be returned. Defaults to false which only returns the latest version. - include_all_fields (bool): If all fields should be included in the response. Defaults to false which does not include script, configuration.inputs and configuration.outputs in the response. - limit (int | None): Maximum number of simulator routine revisions to return. Defaults to return all items. - sort (PropertySort | None): The criteria to sort by. + routine_external_ids: Filter on routine external ids. + model_external_ids: Filter on model external ids. + simulator_integration_external_ids: Filter on simulator integration external ids. + simulator_external_ids: Filter on simulator external ids. + kind: Filter by routine kind. Note that this filter cannot be applied when 'include_all_fields' set to 'True' in the same query. + created_time: Filter on created time. + all_versions: If all versions of the routine should be returned. Defaults to false which only returns the latest version. + include_all_fields: If all fields should be included in the response. Defaults to false which does not include script, configuration.inputs and configuration.outputs in the response. + limit: Maximum number of simulator routine revisions to return. Defaults to return all items. + sort: The criteria to sort by. Returns: - SimulatorRoutineRevisionList: List of simulator routine revisions + List of simulator routine revisions Examples: List simulator routine revisions: diff --git a/cognite/client/_sync_api/simulators/routines.py b/cognite/client/_sync_api/simulators/routines.py index fab7527db0..41145540ca 100644 --- a/cognite/client/_sync_api/simulators/routines.py +++ b/cognite/client/_sync_api/simulators/routines.py @@ -1,6 +1,6 @@ """ =============================================================================== -74ca95fcd30f975d96feaf749e9ebb57 +a258cfdf0826032111b578c59c419d6c This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -69,14 +69,14 @@ def __call__( Fetches simulator routines as they are iterated over, so you keep a limited number of simulator routines in memory. Args: - chunk_size (int | None): Number of simulator routines to return in each chunk. Defaults to yielding one simulator routine a time. - model_external_ids (Sequence[str] | None): Filter on model external ids. - simulator_integration_external_ids (Sequence[str] | None): Filter on simulator integration external ids. - sort (PropertySort | None): The criteria to sort by. - limit (int | None): Maximum number of simulator routines to return. Defaults to return all items. + chunk_size: Number of simulator routines to return in each chunk. Defaults to yielding one simulator routine a time. + model_external_ids: Filter on model external ids. + simulator_integration_external_ids: Filter on simulator integration external ids. + sort: The criteria to sort by. + limit: Maximum number of simulator routines to return. Defaults to return all items. Yields: - SimulatorRoutine | SimulatorRoutineList: yields SimulatorRoutine one by one if chunk is not specified, else SimulatorRoutineList objects. + yields SimulatorRoutine one by one if chunk is not specified, else SimulatorRoutineList objects. """ # noqa: DOC404 yield from SyncIterator( self.__async_client.simulators.routines( @@ -101,10 +101,10 @@ def create( `Create simulator routines `_ Args: - routine (SimulatorRoutineWrite | Sequence[SimulatorRoutineWrite]): Simulator routine(s) to create. + routine: Simulator routine(s) to create. Returns: - SimulatorRoutine | SimulatorRoutineList: Created simulator routine(s) + Created simulator routine(s) Examples: Create new simulator routines: @@ -140,8 +140,8 @@ def delete( `Delete simulator routines `_ Args: - ids (int | Sequence[int] | None): ids (or sequence of ids) for the routine(s) to delete. - external_ids (str | SequenceNotStr[str] | SequenceNotStr[str] | None): external ids (or sequence of external ids) for the routine(s) to delete. + ids: ids (or sequence of ids) for the routine(s) to delete. + external_ids: external ids (or sequence of external ids) for the routine(s) to delete. Examples: Delete simulator routines by id or external id: @@ -166,14 +166,14 @@ def list( Retrieves a list of simulator routines that match the given criteria. Args: - limit (int): Maximum number of results to return. Defaults to 25. Set to -1, float(“inf”) or None to return all items. - model_external_ids (Sequence[str] | None): Filter on model external ids. - simulator_integration_external_ids (Sequence[str] | None): Filter on simulator integration external ids. - kind (Literal['long'] | None): Filter on routine kind. - sort (PropertySort | None): The criteria to sort by. + limit: Maximum number of results to return. Defaults to 25. Set to -1, float(“inf”) or None to return all items. + model_external_ids: Filter on model external ids. + simulator_integration_external_ids: Filter on simulator integration external ids. + kind: Filter on routine kind. + sort: The criteria to sort by. Returns: - SimulatorRoutineList: List of simulator routines + List of simulator routines Examples: List simulator routines: @@ -258,21 +258,18 @@ def run( 2. By routine revision external ID + model revision external ID Args: - routine_external_id (str | None): External id of the simulator routine to run. - Cannot be specified together with routine_revision_external_id and model_revision_external_id. - routine_revision_external_id (str | None): External id of the simulator routine revision to run. - Must be specified together with model_revision_external_id. - model_revision_external_id (str | None): External id of the simulator model revision. - Must be specified together with routine_revision_external_id. - inputs (Sequence[SimulationInputOverride] | None): List of input overrides - run_time (int | None): Run time in milliseconds. Reference timestamp used for data pre-processing and data sampling. - queue (bool | None): Queue the simulation run when connector is down. - log_severity (Literal['Debug', 'Information', 'Warning', 'Error'] | None): Override the minimum severity level for the simulation run logs. If not provided, the minimum severity is read from the connector logger configuration. - wait (bool): Wait until the simulation run is finished. Defaults to True. - timeout (float): Timeout in seconds for waiting for the simulation run to finish. Defaults to 60 seconds. + routine_external_id: External id of the simulator routine to run. Cannot be specified together with routine_revision_external_id and model_revision_external_id. + routine_revision_external_id: External id of the simulator routine revision to run. Must be specified together with model_revision_external_id. + model_revision_external_id: External id of the simulator model revision. Must be specified together with routine_revision_external_id. + inputs: List of input overrides + run_time: Run time in milliseconds. Reference timestamp used for data pre-processing and data sampling. + queue: Queue the simulation run when connector is down. + log_severity: Override the minimum severity level for the simulation run logs. If not provided, the minimum severity is read from the connector logger configuration. + wait: Wait until the simulation run is finished. Defaults to True. + timeout: Timeout in seconds for waiting for the simulation run to finish. Defaults to 60 seconds. Returns: - SimulationRun: Created simulation run + Created simulation run Examples: Create new simulation run using routine external ID: diff --git a/cognite/client/_sync_api/simulators/runs.py b/cognite/client/_sync_api/simulators/runs.py index 5f687a8491..45af7bd629 100644 --- a/cognite/client/_sync_api/simulators/runs.py +++ b/cognite/client/_sync_api/simulators/runs.py @@ -1,6 +1,6 @@ """ =============================================================================== -627f42fbdbc933e30799270aba58ef0d +2da2c3b09d7047644784b2bbe2cf2f04 This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -92,22 +92,22 @@ def __call__( Fetches simulation runs as they are iterated over, so you keep a limited number of simulation runs in memory. Args: - chunk_size (int | None): Number of simulation runs to return in each chunk. Defaults to yielding one simulation run a time. - limit (int | None): The maximum number of simulation runs to return, pass None to return all. - status (str | None): Filter by simulation run status - run_type (str | None): Filter by simulation run type - model_external_ids (SequenceNotStr[str] | None): Filter by simulator model external ids - simulator_integration_external_ids (SequenceNotStr[str] | None): Filter by simulator integration external ids - simulator_external_ids (SequenceNotStr[str] | None): Filter by simulator external ids - routine_external_ids (SequenceNotStr[str] | None): Filter by routine external ids - routine_revision_external_ids (SequenceNotStr[str] | None): Filter by routine revision external ids - model_revision_external_ids (SequenceNotStr[str] | None): Filter by model revision external ids - created_time (TimestampRange | None): Filter by created time - simulation_time (TimestampRange | None): Filter by simulation time - sort (SimulationRunsSort | None): The criteria to sort by. + chunk_size: Number of simulation runs to return in each chunk. Defaults to yielding one simulation run a time. + limit: The maximum number of simulation runs to return, pass None to return all. + status: Filter by simulation run status + run_type: Filter by simulation run type + model_external_ids: Filter by simulator model external ids + simulator_integration_external_ids: Filter by simulator integration external ids + simulator_external_ids: Filter by simulator external ids + routine_external_ids: Filter by routine external ids + routine_revision_external_ids: Filter by routine revision external ids + model_revision_external_ids: Filter by model revision external ids + created_time: Filter by created time + simulation_time: Filter by simulation time + sort: The criteria to sort by. Yields: - SimulationRun | SimulationRunList: yields Simulation Run one by one if chunk is not specified, else SimulatorRunsList objects. + yields Simulation Run one by one if chunk is not specified, else SimulatorRunsList objects. """ # noqa: DOC404 yield from SyncIterator( self.__async_client.simulators.runs( @@ -148,21 +148,21 @@ def list( Retrieves a list of simulation runs that match the given criteria. Args: - limit (int | None): The maximum number of simulation runs to return, pass None to return all. - status (str | None): Filter by simulation run status - run_type (str | None): Filter by simulation run type - model_external_ids (SequenceNotStr[str] | None): Filter by simulator model external ids - simulator_integration_external_ids (SequenceNotStr[str] | None): Filter by simulator integration external ids - simulator_external_ids (SequenceNotStr[str] | None): Filter by simulator external ids - routine_external_ids (SequenceNotStr[str] | None): Filter by routine external ids - routine_revision_external_ids (SequenceNotStr[str] | None): Filter by routine revision external ids - model_revision_external_ids (SequenceNotStr[str] | None): Filter by model revision external ids - created_time (TimestampRange | None): Filter by created time - simulation_time (TimestampRange | None): Filter by simulation time - sort (SimulationRunsSort | None): The criteria to sort by. + limit: The maximum number of simulation runs to return, pass None to return all. + status: Filter by simulation run status + run_type: Filter by simulation run type + model_external_ids: Filter by simulator model external ids + simulator_integration_external_ids: Filter by simulator integration external ids + simulator_external_ids: Filter by simulator external ids + routine_external_ids: Filter by routine external ids + routine_revision_external_ids: Filter by routine revision external ids + model_revision_external_ids: Filter by model revision external ids + created_time: Filter by created time + simulation_time: Filter by simulation time + sort: The criteria to sort by. Returns: - SimulationRunList: List of simulation runs + List of simulation runs Examples: List simulation runs: @@ -216,10 +216,10 @@ def retrieve(self, ids: int | Sequence[int]) -> SimulationRun | SimulationRunLis `Retrieve simulation runs by ID `_ Args: - ids (int | Sequence[int]): The ID(s) of the simulation run(s) to retrieve. + ids: The ID(s) of the simulation run(s) to retrieve. Returns: - SimulationRun | SimulationRunList | None: The simulation run(s) with the given ID(s) + The simulation run(s) with the given ID(s) Examples: Retrieve a single simulation run by id: @@ -241,10 +241,10 @@ def create(self, items: SimulationRunWrite | Sequence[SimulationRunWrite]) -> Si `Create simulation runs `_ Args: - items (SimulationRunWrite | Sequence[SimulationRunWrite]): The simulation run(s) to execute. + items: The simulation run(s) to execute. Returns: - SimulationRun | SimulationRunList: Created simulation run(s) + Created simulation run(s) Examples: Create new simulation run: @@ -270,10 +270,10 @@ def list_run_data(self, run_id: int) -> SimulationRunDataList: Retrieve data associated with a simulation run by ID. Args: - run_id (int): Simulation run id. + run_id: Simulation run id. Returns: - SimulationRunDataList: List of simulation run data + List of simulation run data Examples: Get simulation run data by run id: diff --git a/cognite/client/_sync_api/synthetic_time_series.py b/cognite/client/_sync_api/synthetic_time_series.py index 782eadc574..45307f6886 100644 --- a/cognite/client/_sync_api/synthetic_time_series.py +++ b/cognite/client/_sync_api/synthetic_time_series.py @@ -1,6 +1,6 @@ """ =============================================================================== -c30ea9cd2b7de10dc2a40c7c1065ef4a +d5dfa84b4d5e61943efef506246e1da8 This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -79,21 +79,19 @@ def query( You can read the guide to synthetic time series in our `documentation `_. Args: - expressions (str | sympy.Basic | Sequence[str] | Sequence[sympy.Basic]): Functions to be calculated. Supports both strings and sympy expressions. Strings can have either the API `ts{}` syntax, or contain variable names to be replaced using the `variables` parameter. - start (int | str | datetime.datetime): Inclusive start. - end (int | str | datetime.datetime): Exclusive end. - limit (int | None): Number of datapoints per expression to retrieve. - variables (Mapping[str | sympy.Symbol, str | NodeId | TimeSeries | TimeSeriesWrite] | None): An optional map of symbol replacements. - aggregate (str | None): use this aggregate when replacing entries from `variables`, does not affect time series given in the `ts{}` syntax. - granularity (str | None): use this granularity with the aggregate. - target_unit (str | None): use this target_unit when replacing entries from `variables`, does not affect time series given in the `ts{}` syntax. - target_unit_system (str | None): Same as target_unit, but with unit system (e.g. SI). Only one of target_unit and target_unit_system can be specified. - timezone (str | datetime.timezone | ZoneInfo | None): The timezone to use when aggregating datapoints. For aggregates of granularity 'hour' and longer, - which time zone should we align to. Align to the start of the hour, start of the day or start of the month. For time zones of type Region/Location, - the aggregate duration can vary, typically due to daylight saving time. For time zones of type UTC+/-HH:MM, use increments of 15 minutes. Default: "UTC" (None) + expressions: Functions to be calculated. Supports both strings and sympy expressions. Strings can have either the API `ts{}` syntax, or contain variable names to be replaced using the `variables` parameter. + start: Inclusive start. + end: Exclusive end. + limit: Number of datapoints per expression to retrieve. + variables: An optional map of symbol replacements. + aggregate: use this aggregate when replacing entries from `variables`, does not affect time series given in the `ts{}` syntax. + granularity: use this granularity with the aggregate. + target_unit: use this target_unit when replacing entries from `variables`, does not affect time series given in the `ts{}` syntax. + target_unit_system: Same as target_unit, but with unit system (e.g. SI). Only one of target_unit and target_unit_system can be specified. + timezone: The timezone to use when aggregating datapoints. For aggregates of granularity 'hour' and longer, which time zone should we align to. Align to the start of the hour, start of the day or start of the month. For time zones of type Region/Location, the aggregate duration can vary, typically due to daylight saving time. For time zones of type UTC+/-HH:MM, use increments of 15 minutes. Default: "UTC" (None) Returns: - Datapoints | DatapointsList: A DatapointsList object containing the calculated data. + A DatapointsList object containing the calculated data. Examples: diff --git a/cognite/client/_sync_api/three_d/asset_mapping.py b/cognite/client/_sync_api/three_d/asset_mapping.py index 7a90cbee18..36cbad23e1 100644 --- a/cognite/client/_sync_api/three_d/asset_mapping.py +++ b/cognite/client/_sync_api/three_d/asset_mapping.py @@ -1,6 +1,6 @@ """ =============================================================================== -de9740b6eea727c35dd4ce23a4e3e069 +873c2427dd138d36132ca89077d1ed66 This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -41,15 +41,15 @@ def list( `List 3D node asset mappings. `_ Args: - model_id (int): Id of the model. - revision_id (int): Id of the revision. - node_id (int | None): List only asset mappings associated with this node. - asset_id (int | None): List only asset mappings associated with this asset. - intersects_bounding_box (BoundingBox3D | None): If given, only return asset mappings for assets whose bounding box intersects with the given bounding box. - limit (int | None): Maximum number of asset mappings to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + model_id: Id of the model. + revision_id: Id of the revision. + node_id: List only asset mappings associated with this node. + asset_id: List only asset mappings associated with this asset. + intersects_bounding_box: If given, only return asset mappings for assets whose bounding box intersects with the given bounding box. + limit: Maximum number of asset mappings to return. Defaults to 25. Set to -1, float("inf") or None to return all items. Returns: - ThreeDAssetMappingList: The list of asset mappings. + The list of asset mappings. Example: @@ -104,12 +104,12 @@ def create( `Create 3d node asset mappings. `_ Args: - model_id (int): Id of the model. - revision_id (int): Id of the revision. - asset_mapping (ThreeDAssetMapping | ThreeDAssetMappingWrite | Sequence[ThreeDAssetMapping] | Sequence[ThreeDAssetMappingWrite]): The asset mapping(s) to create. + model_id: Id of the model. + revision_id: Id of the revision. + asset_mapping: The asset mapping(s) to create. Returns: - ThreeDAssetMapping | ThreeDAssetMappingList: The created asset mapping(s). + The created asset mapping(s). Example: @@ -135,9 +135,9 @@ def delete( `Delete 3d node asset mappings. `_ Args: - model_id (int): Id of the model. - revision_id (int): Id of the revision. - asset_mapping (ThreeDAssetMapping | Sequence[ThreeDAssetMapping]): The asset mapping(s) to delete. + model_id: Id of the model. + revision_id: Id of the revision. + asset_mapping: The asset mapping(s) to delete. Example: diff --git a/cognite/client/_sync_api/three_d/files.py b/cognite/client/_sync_api/three_d/files.py index c1d06e3044..7c48dd17a7 100644 --- a/cognite/client/_sync_api/three_d/files.py +++ b/cognite/client/_sync_api/three_d/files.py @@ -1,6 +1,6 @@ """ =============================================================================== -9d5783639b1973653734c4675ef36539 +584e82685934a74ab25d96ac19a59a4a This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -23,10 +23,10 @@ def retrieve(self, id: int) -> bytes: `Retrieve the contents of a 3d file by id. `_ Args: - id (int): The id of the file to retrieve. + id: The id of the file to retrieve. Returns: - bytes: The contents of the file. + The contents of the file. Example: diff --git a/cognite/client/_sync_api/three_d/models.py b/cognite/client/_sync_api/three_d/models.py index f98cfe65f1..deaa809cc7 100644 --- a/cognite/client/_sync_api/three_d/models.py +++ b/cognite/client/_sync_api/three_d/models.py @@ -1,6 +1,6 @@ """ =============================================================================== -548e0609338f18fa22fa7745a576bd52 +c971288b7ad55e15a90c90580f3a572f This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -43,12 +43,12 @@ def __call__( Fetches 3d models as they are iterated over, so you keep a limited number of 3d models in memory. Args: - chunk_size (int | None): Number of 3d models to return in each chunk. Defaults to yielding one model a time. - published (bool | None): Filter based on whether or not the model has published revisions. - limit (int | None): Maximum number of 3d models to return. Defaults to return all items. + chunk_size: Number of 3d models to return in each chunk. Defaults to yielding one model a time. + published: Filter based on whether or not the model has published revisions. + limit: Maximum number of 3d models to return. Defaults to return all items. Yields: - ThreeDModel | ThreeDModelList: yields ThreeDModel one by one if chunk is not specified, else ThreeDModelList objects. + yields ThreeDModel one by one if chunk is not specified, else ThreeDModelList objects. """ # noqa: DOC404 yield from SyncIterator( self.__async_client.three_d.models(chunk_size=chunk_size, published=published, limit=limit) @@ -59,10 +59,10 @@ def retrieve(self, id: int) -> ThreeDModel | None: `Retrieve a 3d model by id `_ Args: - id (int): Get the model with this id. + id: Get the model with this id. Returns: - ThreeDModel | None: The requested 3d model. + The requested 3d model. Example: @@ -80,11 +80,11 @@ def list(self, published: bool | None = None, limit: int | None = DEFAULT_LIMIT_ `List 3d models. `_ Args: - published (bool | None): Filter based on whether or not the model has published revisions. - limit (int | None): Maximum number of models to retrieve. Defaults to 25. Set to -1, float("inf") or None to return all items. + published: Filter based on whether or not the model has published revisions. + limit: Maximum number of models to retrieve. Defaults to 25. Set to -1, float("inf") or None to return all items. Returns: - ThreeDModelList: The list of 3d models. + The list of 3d models. Examples: @@ -130,14 +130,12 @@ def create( `Create new 3d models. `_ Args: - name (str | ThreeDModelWrite | SequenceNotStr[str | ThreeDModelWrite]): The name of the 3d model(s) or 3D - model object to create. If a 3D model object is provided, the other arguments are ignored. - data_set_id (int | None): The id of the dataset this 3D model belongs to. - metadata (dict[str, str] | None): Custom, application-specific metadata. String key -> String value. - Limits: Maximum length of key is 32 bytes, value 512 bytes, up to 16 key-value pairs. + name: The name of the 3d model(s) or 3D model object to create. If a 3D model object is provided, the other arguments are ignored. + data_set_id: The id of the dataset this 3D model belongs to. + metadata: Custom, application-specific metadata. String key -> String value. Limits: Maximum length of key is 32 bytes, value 512 bytes, up to 16 key-value pairs. Returns: - ThreeDModel | ThreeDModelList: The created 3d model(s). + The created 3d model(s). Example: @@ -185,11 +183,11 @@ def update( `Update 3d models. `_ Args: - item (ThreeDModel | ThreeDModelUpdate | Sequence[ThreeDModel | ThreeDModelUpdate]): ThreeDModel(s) to update - mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (ThreeDModel or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. + item: ThreeDModel(s) to update + mode: How to update data when a non-update object is given (ThreeDModel or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. Returns: - ThreeDModel | ThreeDModelList: Updated ThreeDModel(s) + Updated ThreeDModel(s) Examples: @@ -215,7 +213,7 @@ def delete(self, id: int | Sequence[int]) -> None: `Delete 3d models. `_ Args: - id (int | Sequence[int]): ID or list of IDs to delete. + id: ID or list of IDs to delete. Example: diff --git a/cognite/client/_sync_api/three_d/revisions.py b/cognite/client/_sync_api/three_d/revisions.py index 15b245fe9b..420b2f2f3f 100644 --- a/cognite/client/_sync_api/three_d/revisions.py +++ b/cognite/client/_sync_api/three_d/revisions.py @@ -1,6 +1,6 @@ """ =============================================================================== -edd0706bfc08e69741933f0ef59dec72 +cc358eab067a2c063e28b6128b2b1abf This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -45,13 +45,13 @@ def __call__( Fetches 3d model revisions as they are iterated over, so you keep a limited number of 3d model revisions in memory. Args: - model_id (int): Iterate over revisions for the model with this id. - chunk_size (int | None): Number of 3d model revisions to return in each chunk. Defaults to yielding one model a time. - published (bool): Filter based on whether or not the revision has been published. - limit (int | None): Maximum number of 3d model revisions to return. Defaults to return all items. + model_id: Iterate over revisions for the model with this id. + chunk_size: Number of 3d model revisions to return in each chunk. Defaults to yielding one model a time. + published: Filter based on whether or not the revision has been published. + limit: Maximum number of 3d model revisions to return. Defaults to return all items. Yields: - ThreeDModelRevision | ThreeDModelRevisionList: yields ThreeDModelRevision one by one if chunk is not specified, else ThreeDModelRevisionList objects. + yields ThreeDModelRevision one by one if chunk is not specified, else ThreeDModelRevisionList objects. """ # noqa: DOC404 yield from SyncIterator( self.__async_client.three_d.revisions( @@ -67,11 +67,11 @@ def retrieve(self, model_id: int, id: int) -> ThreeDModelRevision | None: `Retrieve a 3d model revision by id `_ Args: - model_id (int): Get the revision under the model with this id. - id (int): Get the model revision with this id. + model_id: Get the revision under the model with this id. + id: Get the model revision with this id. Returns: - ThreeDModelRevision | None: The requested 3d model revision. + The requested 3d model revision. Example: @@ -106,11 +106,11 @@ def create( `Create a revisions for a specified 3d model. `_ Args: - model_id (int): Create revisions for this model. - revision (ThreeDModelRevision | ThreeDModelRevisionWrite | Sequence[ThreeDModelRevision] | Sequence[ThreeDModelRevisionWrite]): The revision(s) to create. + model_id: Create revisions for this model. + revision: The revision(s) to create. Returns: - ThreeDModelRevision | ThreeDModelRevisionList: The created revision(s) + The created revision(s) Example: @@ -132,12 +132,12 @@ def list( `List 3d model revisions. `_ Args: - model_id (int): List revisions under the model with this id. - published (bool): Filter based on whether or not the revision is published. - limit (int | None): Maximum number of models to retrieve. Defaults to 25. Set to -1, float("inf") or None to return all items. + model_id: List revisions under the model with this id. + published: Filter based on whether or not the revision is published. + limit: Maximum number of models to retrieve. Defaults to 25. Set to -1, float("inf") or None to return all items. Returns: - ThreeDModelRevisionList: The list of 3d model revisions. + The list of 3d model revisions. Example: @@ -162,12 +162,12 @@ def update( `Update 3d model revisions. `_ Args: - model_id (int): Update the revision under the model with this id. - item (ThreeDModelRevision | ThreeDModelRevisionUpdate | Sequence[ThreeDModelRevision | ThreeDModelRevisionUpdate]): ThreeDModelRevision(s) to update - mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (ThreeDModelRevision or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. + model_id: Update the revision under the model with this id. + item: ThreeDModelRevision(s) to update + mode: How to update data when a non-update object is given (ThreeDModelRevision or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. Returns: - ThreeDModelRevision | ThreeDModelRevisionList: Updated ThreeDModelRevision(s) + Updated ThreeDModelRevision(s) Examples: @@ -193,8 +193,8 @@ def delete(self, model_id: int, id: int | Sequence[int]) -> None: `Delete 3d model revisions. `_ Args: - model_id (int): Delete the revision under the model with this id. - id (int | Sequence[int]): ID or list of IDs to delete. + model_id: Delete the revision under the model with this id. + id: ID or list of IDs to delete. Example: @@ -212,9 +212,9 @@ def update_thumbnail(self, model_id: int, revision_id: int, file_id: int) -> Non `Update a revision thumbnail. `_ Args: - model_id (int): Id of the model. - revision_id (int): Id of the revision. - file_id (int): Id of the thumbnail file in the Files API. + model_id: Id of the model. + revision_id: Id of the revision. + file_id: Id of the thumbnail file in the Files API. Example: @@ -248,16 +248,16 @@ def list_nodes( the resulting subtree with the 'depth' query parameter. Args: - model_id (int): Id of the model. - revision_id (int): Id of the revision. - node_id (int | None): ID of the root node of the subtree you request (default is the root node). - depth (int | None): Get sub nodes up to this many levels below the specified node. Depth 0 is the root node. - sort_by_node_id (bool): Returns the nodes in `nodeId` order. - partitions (int | None): The result is retrieved in this many parts in parallel. Requires `sort_by_node_id` to be set to `true`. - limit (int | None): Maximum number of nodes to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + model_id: Id of the model. + revision_id: Id of the revision. + node_id: ID of the root node of the subtree you request (default is the root node). + depth: Get sub nodes up to this many levels below the specified node. Depth 0 is the root node. + sort_by_node_id: Returns the nodes in `nodeId` order. + partitions: The result is retrieved in this many parts in parallel. Requires `sort_by_node_id` to be set to `true`. + limit: Maximum number of nodes to return. Defaults to 25. Set to -1, float("inf") or None to return all items. Returns: - ThreeDNodeList: The list of 3d nodes. + The list of 3d nodes. Example: @@ -292,14 +292,14 @@ def filter_nodes( `List nodes in a revision, filtered by node property values. `_ Args: - model_id (int): Id of the model. - revision_id (int): Id of the revision. - properties (dict[str, dict[str, SequenceNotStr[str]]] | None): Properties for filtering. The object contains one or more category. Each category references one or more properties. Each property is associated with a list of values. For a node to satisfy the filter, it must, for each category/property in the filter, contain the category+property combination with a value that is contained within the corresponding list in the filter. - limit (int | None): Maximum number of nodes to return. Defaults to 25. Set to -1, float("inf") or None to return all items. - partitions (int | None): The result is retrieved in this many parts in parallel. Requires `sort_by_node_id` to be set to `true`. + model_id: Id of the model. + revision_id: Id of the revision. + properties: Properties for filtering. The object contains one or more category. Each category references one or more properties. Each property is associated with a list of values. For a node to satisfy the filter, it must, for each category/property in the filter, contain the category+property combination with a value that is contained within the corresponding list in the filter. + limit: Maximum number of nodes to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + partitions: The result is retrieved in this many parts in parallel. Requires `sort_by_node_id` to be set to `true`. Returns: - ThreeDNodeList: The list of 3d nodes. + The list of 3d nodes. Example: @@ -323,13 +323,13 @@ def list_ancestor_nodes( `Retrieves a list of ancestor nodes of a given node, including itself, in the hierarchy of the 3D model `_ Args: - model_id (int): Id of the model. - revision_id (int): Id of the revision. - node_id (int | None): ID of the node to get the ancestors of. - limit (int | None): Maximum number of nodes to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + model_id: Id of the model. + revision_id: Id of the revision. + node_id: ID of the node to get the ancestors of. + limit: Maximum number of nodes to return. Defaults to 25. Set to -1, float("inf") or None to return all items. Returns: - ThreeDNodeList: The list of 3d nodes. + The list of 3d nodes. Example: diff --git a/cognite/client/_sync_api/time_series.py b/cognite/client/_sync_api/time_series.py index 0161d34488..932824aac9 100644 --- a/cognite/client/_sync_api/time_series.py +++ b/cognite/client/_sync_api/time_series.py @@ -1,6 +1,6 @@ """ =============================================================================== -244b8f18467f4c17297f8ccf14dc127e +26f14757e2571b3eb1bd9d112aa90a5c This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -118,29 +118,29 @@ def __call__( Fetches time series as they are iterated over, so you keep a limited number of objects in memory. Args: - chunk_size (int | None): Number of time series to return in each chunk. Defaults to yielding one time series a time. - name (str | None): Name of the time series. Often referred to as tag. - unit (str | None): Unit of the time series. - unit_external_id (str | None): Filter on unit external ID. - unit_quantity (str | None): Filter on unit quantity. - is_string (bool | None): Whether the time series is a string time series. - is_step (bool | None): Whether the time series is a step (piecewise constant) time series. - asset_ids (Sequence[int] | None): List time series related to these assets. - asset_external_ids (SequenceNotStr[str] | None): List time series related to these assets. - asset_subtree_ids (int | Sequence[int] | None): Only include time series that are related to an asset in a subtree rooted at any of these assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. - asset_subtree_external_ids (str | SequenceNotStr[str] | None): Only include time series that are related to an asset in a subtree rooted at any of these assetExternalIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. - data_set_ids (int | Sequence[int] | None): Return only time series in the specified data set(s) with this id / these ids. - data_set_external_ids (str | SequenceNotStr[str] | None): Return only time series in the specified data set(s) with this external id / these external ids. - metadata (dict[str, Any] | None): Custom, application specific metadata. String key -> String value - external_id_prefix (str | None): Filter by this (case-sensitive) prefix for the external ID. - created_time (dict[str, Any] | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. - last_updated_time (dict[str, Any] | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. - limit (int | None): Maximum number of time series to return. Defaults to return all items. - advanced_filter (Filter | dict[str, Any] | None): Advanced filter query using the filter DSL (Domain Specific Language). It allows defining complex filtering expressions that combine simple operations, such as equals, prefix, exists, etc., using boolean operators and, or, and not. - sort (SortSpec | list[SortSpec] | None): The criteria to sort by. Defaults to desc for `_score_` and asc for all other properties. Sort is not allowed if `partitions` is used. + chunk_size: Number of time series to return in each chunk. Defaults to yielding one time series a time. + name: Name of the time series. Often referred to as tag. + unit: Unit of the time series. + unit_external_id: Filter on unit external ID. + unit_quantity: Filter on unit quantity. + is_string: Whether the time series is a string time series. + is_step: Whether the time series is a step (piecewise constant) time series. + asset_ids: List time series related to these assets. + asset_external_ids: List time series related to these assets. + asset_subtree_ids: Only include time series that are related to an asset in a subtree rooted at any of these assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. + asset_subtree_external_ids: Only include time series that are related to an asset in a subtree rooted at any of these assetExternalIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. + data_set_ids: Return only time series in the specified data set(s) with this id / these ids. + data_set_external_ids: Return only time series in the specified data set(s) with this external id / these external ids. + metadata: Custom, application specific metadata. String key -> String value + external_id_prefix: Filter by this (case-sensitive) prefix for the external ID. + created_time: Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. + last_updated_time: Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. + limit: Maximum number of time series to return. Defaults to return all items. + advanced_filter: Advanced filter query using the filter DSL (Domain Specific Language). It allows defining complex filtering expressions that combine simple operations, such as equals, prefix, exists, etc., using boolean operators and, or, and not. + sort: The criteria to sort by. Defaults to desc for `_score_` and asc for all other properties. Sort is not allowed if `partitions` is used. Yields: - TimeSeries | TimeSeriesList: yields TimeSeries one by one if chunk_size is not specified, else TimeSeriesList objects. + yields TimeSeries one by one if chunk_size is not specified, else TimeSeriesList objects. """ # noqa: DOC404 yield from SyncIterator( self.__async_client.time_series( @@ -174,12 +174,12 @@ def retrieve( `Retrieve a single time series by id. `_ Args: - id (int | None): ID - external_id (str | None): External ID - instance_id (NodeId | None): Instance ID + id: ID + external_id: External ID + instance_id: Instance ID Returns: - TimeSeries | None: Requested time series or None if it does not exist. + Requested time series or None if it does not exist. Examples: @@ -209,13 +209,13 @@ def retrieve_multiple( `Retrieve multiple time series by id. `_ Args: - ids (Sequence[int] | None): IDs - external_ids (SequenceNotStr[str] | None): External IDs - instance_ids (Sequence[NodeId] | None): Instance IDs - ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception. + ids: IDs + external_ids: External IDs + instance_ids: Instance IDs + ignore_unknown_ids: Ignore IDs and external IDs that are not found rather than throw an exception. Returns: - TimeSeriesList: The requested time series. + The requested time series. Examples: @@ -245,11 +245,11 @@ def aggregate_count( `Count of time series matching the specified filters and search. `_ Args: - advanced_filter (Filter | dict[str, Any] | None): The filter to narrow down the time series to count. - filter (TimeSeriesFilter | dict[str, Any] | None): The filter to narrow down time series to count requiring exact match. + advanced_filter: The filter to narrow down the time series to count. + filter: The filter to narrow down time series to count requiring exact match. Returns: - int: The number of time series matching the specified filters and search. + The number of time series matching the specified filters and search. Examples: @@ -280,12 +280,12 @@ def aggregate_cardinality_values( `Find approximate property count for time series. `_ Args: - property (TimeSeriesProperty | str | list[str]): The property to count the cardinality of. - advanced_filter (Filter | dict[str, Any] | None): The filter to narrow down the time series to count cardinality. - aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets. - filter (TimeSeriesFilter | dict[str, Any] | None): The filter to narrow down the time series to count requiring exact match. + property: The property to count the cardinality of. + advanced_filter: The filter to narrow down the time series to count cardinality. + aggregate_filter: The filter to apply to the resulting buckets. + filter: The filter to narrow down the time series to count requiring exact match. Returns: - int: The number of properties matching the specified filters and search. + The number of properties matching the specified filters and search. Examples: @@ -326,12 +326,12 @@ def aggregate_cardinality_properties( `Find approximate paths count for time series. `_ Args: - path (TimeSeriesProperty | str | list[str]): The scope in every document to aggregate properties. The only value allowed now is ["metadata"]. It means to aggregate only metadata properties (aka keys). - advanced_filter (Filter | dict[str, Any] | None): The filter to narrow down the time series to count cardinality. - aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets. - filter (TimeSeriesFilter | dict[str, Any] | None): The filter to narrow down the time series to count requiring exact match. + path: The scope in every document to aggregate properties. The only value allowed now is ["metadata"]. It means to aggregate only metadata properties (aka keys). + advanced_filter: The filter to narrow down the time series to count cardinality. + aggregate_filter: The filter to apply to the resulting buckets. + filter: The filter to narrow down the time series to count requiring exact match. Returns: - int: The number of properties matching the specified filters and search. + The number of properties matching the specified filters and search. Examples: @@ -360,13 +360,13 @@ def aggregate_unique_values( `Get unique properties with counts for time series. `_ Args: - property (TimeSeriesProperty | str | list[str]): The property to group by. - advanced_filter (Filter | dict[str, Any] | None): The filter to narrow down the time series to count cardinality. - aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets. - filter (TimeSeriesFilter | dict[str, Any] | None): The filter to narrow down the time series to count requiring exact match. + property: The property to group by. + advanced_filter: The filter to narrow down the time series to count cardinality. + aggregate_filter: The filter to apply to the resulting buckets. + filter: The filter to narrow down the time series to count requiring exact match. Returns: - UniqueResultList: List of unique values of time series matching the specified filters and search. + List of unique values of time series matching the specified filters and search. Examples: @@ -416,13 +416,13 @@ def aggregate_unique_properties( `Get unique paths with counts for time series. `_ Args: - path (TimeSeriesProperty | str | list[str]): The scope in every document to aggregate properties. The only value allowed now is ["metadata"]. It means to aggregate only metadata properties (aka keys). - advanced_filter (Filter | dict[str, Any] | None): The filter to narrow down the time series to count cardinality. - aggregate_filter (AggregationFilter | dict[str, Any] | None): The filter to apply to the resulting buckets. - filter (TimeSeriesFilter | dict[str, Any] | None): The filter to narrow down the time series to count requiring exact match. + path: The scope in every document to aggregate properties. The only value allowed now is ["metadata"]. It means to aggregate only metadata properties (aka keys). + advanced_filter: The filter to narrow down the time series to count cardinality. + aggregate_filter: The filter to apply to the resulting buckets. + filter: The filter to narrow down the time series to count requiring exact match. Returns: - UniqueResultList: List of unique values of time series matching the specified filters and search. + List of unique values of time series matching the specified filters and search. Examples: @@ -453,10 +453,10 @@ def create( `Create one or more time series. `_ Args: - time_series (TimeSeries | TimeSeriesWrite | Sequence[TimeSeries] | Sequence[TimeSeriesWrite]): TimeSeries or list of TimeSeries to create. + time_series: TimeSeries or list of TimeSeries to create. Returns: - TimeSeries | TimeSeriesList: The created time series. + The created time series. Examples: @@ -480,9 +480,9 @@ def delete( `Delete one or more time series. `_ Args: - id (int | Sequence[int] | None): Id or list of ids - external_id (str | SequenceNotStr[str] | None): External ID or list of external ids - ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception. + id: Id or list of ids + external_id: External ID or list of external ids + ignore_unknown_ids: Ignore IDs and external IDs that are not found rather than throw an exception. Examples: @@ -525,11 +525,11 @@ def update( `Update one or more time series. `_ Args: - item (TimeSeries | TimeSeriesWrite | TimeSeriesUpdate | Sequence[TimeSeries | TimeSeriesWrite | TimeSeriesUpdate]): Time series to update - mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (TimeSeries or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. + item: Time series to update + mode: How to update data when a non-update object is given (TimeSeries or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. Returns: - TimeSeries | TimeSeriesList: Updated time series. + Updated time series. Examples: @@ -583,11 +583,11 @@ def upsert( For more details, see :ref:`appendix-upsert`. Args: - item (TimeSeries | TimeSeriesWrite | Sequence[TimeSeries | TimeSeriesWrite]): TimeSeries or list of TimeSeries to upsert. - mode (Literal['patch', 'replace']): Whether to patch or replace in the case the time series are existing. If you set 'patch', the call will only update fields with non-null values (default). Setting 'replace' will unset any fields that are not specified. + item: TimeSeries or list of TimeSeries to upsert. + mode: Whether to patch or replace in the case the time series are existing. If you set 'patch', the call will only update fields with non-null values (default). Setting 'replace' will unset any fields that are not specified. Returns: - TimeSeries | TimeSeriesList: The upserted time series(s). + The upserted time series(s). Examples: @@ -617,14 +617,14 @@ def search( Primarily meant for human-centric use-cases and data exploration, not for programs, since matching and ordering may change over time. Use the `list` function if stable or exact matches are required. Args: - name (str | None): Prefix and fuzzy search on name. - description (str | None): Prefix and fuzzy search on description. - query (str | None): Search on name and description using wildcard search on each of the words (separated by spaces). Retrieves results where at least one word must match. Example: 'some other' - filter (TimeSeriesFilter | dict[str, Any] | None): Filter to apply. Performs exact match on these fields. - limit (int): Max number of results to return. + name: Prefix and fuzzy search on name. + description: Prefix and fuzzy search on description. + query: Search on name and description using wildcard search on each of the words (separated by spaces). Retrieves results where at least one word must match. Example: 'some other' + filter: Filter to apply. Performs exact match on these fields. + limit: Max number of results to return. Returns: - TimeSeriesList: List of requested time series. + List of requested time series. Examples: @@ -672,29 +672,29 @@ def list( `List time series `_ Args: - name (str | None): Name of the time series. Often referred to as tag. - unit (str | None): Unit of the time series. - unit_external_id (str | None): Filter on unit external ID. - unit_quantity (str | None): Filter on unit quantity. - is_string (bool | None): Whether the time series is a string time series. - is_step (bool | None): Whether the time series is a step (piecewise constant) time series. - asset_ids (Sequence[int] | None): List time series related to these assets. - asset_external_ids (SequenceNotStr[str] | None): List time series related to these assets. - asset_subtree_ids (int | Sequence[int] | None): Only include time series that are related to an asset in a subtree rooted at any of these assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. - asset_subtree_external_ids (str | SequenceNotStr[str] | None): Only include time series that are related to an asset in a subtree rooted at any of these assetExternalIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. - data_set_ids (int | Sequence[int] | None): Return only time series in the specified data set(s) with this id / these ids. - data_set_external_ids (str | SequenceNotStr[str] | None): Return only time series in the specified data set(s) with this external id / these external ids. - metadata (dict[str, Any] | None): Custom, application specific metadata. String key -> String value - external_id_prefix (str | None): Filter by this (case-sensitive) prefix for the external ID. - created_time (dict[str, Any] | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. - last_updated_time (dict[str, Any] | None): Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. - partitions (int | None): Retrieve resources in parallel using this number of workers (values up to 10 allowed), limit must be set to `None` (or `-1`). - limit (int | None): Maximum number of time series to return. Defaults to 25. Set to -1, float("inf") or None to return all items. - advanced_filter (Filter | dict[str, Any] | None): Advanced filter query using the filter DSL (Domain Specific Language). It allows defining complex filtering expressions that combine simple operations, such as equals, prefix, exists, etc., using boolean operators and, or, and not. See examples below for usage. - sort (SortSpec | list[SortSpec] | TimeSeriesProperty | None): The criteria to sort by. Defaults to desc for `_score_` and asc for all other properties. Sort is not allowed if `partitions` is used. + name: Name of the time series. Often referred to as tag. + unit: Unit of the time series. + unit_external_id: Filter on unit external ID. + unit_quantity: Filter on unit quantity. + is_string: Whether the time series is a string time series. + is_step: Whether the time series is a step (piecewise constant) time series. + asset_ids: List time series related to these assets. + asset_external_ids: List time series related to these assets. + asset_subtree_ids: Only include time series that are related to an asset in a subtree rooted at any of these assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. + asset_subtree_external_ids: Only include time series that are related to an asset in a subtree rooted at any of these assetExternalIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. + data_set_ids: Return only time series in the specified data set(s) with this id / these ids. + data_set_external_ids: Return only time series in the specified data set(s) with this external id / these external ids. + metadata: Custom, application specific metadata. String key -> String value + external_id_prefix: Filter by this (case-sensitive) prefix for the external ID. + created_time: Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. + last_updated_time: Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms. + partitions: Retrieve resources in parallel using this number of workers (values up to 10 allowed), limit must be set to `None` (or `-1`). + limit: Maximum number of time series to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + advanced_filter: Advanced filter query using the filter DSL (Domain Specific Language). It allows defining complex filtering expressions that combine simple operations, such as equals, prefix, exists, etc., using boolean operators and, or, and not. See examples below for usage. + sort: The criteria to sort by. Defaults to desc for `_score_` and asc for all other properties. Sort is not allowed if `partitions` is used. Returns: - TimeSeriesList: The requested time series. + The requested time series. .. note:: When using `partitions`, there are few considerations to keep in mind: diff --git a/cognite/client/_sync_api/transformations/__init__.py b/cognite/client/_sync_api/transformations/__init__.py index 06f75db379..69b7e2b481 100644 --- a/cognite/client/_sync_api/transformations/__init__.py +++ b/cognite/client/_sync_api/transformations/__init__.py @@ -1,6 +1,6 @@ """ =============================================================================== -43e6460ffa80c8cd4ae83314d8f0b636 +afd8ce43f618d6a585daa09bb47a37e9 This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -101,23 +101,23 @@ def __call__( Iterate over transformations Args: - chunk_size (int | None): Number of transformations to return in each chunk. Defaults to yielding one transformation a time. - include_public (bool): Whether public transformations should be included in the results. (default true). - name_regex (str | None): Regex expression to match the transformation name - query_regex (str | None): Regex expression to match the transformation query - destination_type (str | None): Transformation destination resource name to filter by. - conflict_mode (str | None): Filters by a selected transformation action type: abort/create, upsert, update, delete - cdf_project_name (str | None): Project name to filter by configured source and destination project - has_blocked_error (bool | None): Whether only the blocked transformations should be included in the results. - created_time (dict[str, Any] | TimestampRange | None): Range between two timestamps - last_updated_time (dict[str, Any] | TimestampRange | None): Range between two timestamps - data_set_ids (int | list[int] | None): Return only transformations in the specified data sets with these id(s). - data_set_external_ids (str | list[str] | None): Return only transformations in the specified data sets with these external id(s). - tags (TagsFilter | None): Return only the resource matching the specified tags constraints. It only supports ContainsAny as of now. - limit (int | None): Limits the number of results to be returned. Defaults to yielding all transformations. + chunk_size: Number of transformations to return in each chunk. Defaults to yielding one transformation a time. + include_public: Whether public transformations should be included in the results. (default true). + name_regex: Regex expression to match the transformation name + query_regex: Regex expression to match the transformation query + destination_type: Transformation destination resource name to filter by. + conflict_mode: Filters by a selected transformation action type: abort/create, upsert, update, delete + cdf_project_name: Project name to filter by configured source and destination project + has_blocked_error: Whether only the blocked transformations should be included in the results. + created_time: Range between two timestamps + last_updated_time: Range between two timestamps + data_set_ids: Return only transformations in the specified data sets with these id(s). + data_set_external_ids: Return only transformations in the specified data sets with these external id(s). + tags: Return only the resource matching the specified tags constraints. It only supports ContainsAny as of now. + limit: Limits the number of results to be returned. Defaults to yielding all transformations. Yields: - Transformation | TransformationList: Yields transformations in chunks if chunk_size is specified, otherwise one transformation at a time. + Yields transformations in chunks if chunk_size is specified, otherwise one transformation at a time. """ # noqa: DOC404 yield from SyncIterator( self.__async_client.transformations( @@ -154,10 +154,10 @@ def create( `Create one or more transformations. `_ Args: - transformation (Transformation | TransformationWrite | Sequence[Transformation] | Sequence[TransformationWrite]): Transformation or list of transformations to create. + transformation: Transformation or list of transformations to create. Returns: - Transformation | TransformationList: Created transformation(s) + Created transformation(s) Examples: @@ -231,9 +231,9 @@ def delete( `Delete one or more transformations. `_ Args: - id (int | Sequence[int] | None): Id or list of ids. - external_id (str | SequenceNotStr[str] | None): External ID or list of external ids. - ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception. + id: Id or list of ids. + external_id: External ID or list of external ids. + ignore_unknown_ids: Ignore IDs and external IDs that are not found rather than throw an exception. Example: @@ -270,22 +270,22 @@ def list( `List all transformations. `_ Args: - include_public (bool): Whether public transformations should be included in the results. (default true). - name_regex (str | None): Regex expression to match the transformation name - query_regex (str | None): Regex expression to match the transformation query - destination_type (str | None): Transformation destination resource name to filter by. - conflict_mode (str | None): Filters by a selected transformation action type: abort/create, upsert, update, delete - cdf_project_name (str | None): Project name to filter by configured source and destination project - has_blocked_error (bool | None): Whether only the blocked transformations should be included in the results. - created_time (dict[str, Any] | TimestampRange | None): Range between two timestamps - last_updated_time (dict[str, Any] | TimestampRange | None): Range between two timestamps - data_set_ids (int | list[int] | None): Return only transformations in the specified data sets with these id(s). - data_set_external_ids (str | list[str] | None): Return only transformations in the specified data sets with these external id(s). - tags (TagsFilter | None): Return only the resource matching the specified tags constraints. It only supports ContainsAny as of now. - limit (int | None): Limits the number of results to be returned. To retrieve all results use limit=-1, default limit is 25. + include_public: Whether public transformations should be included in the results. (default true). + name_regex: Regex expression to match the transformation name + query_regex: Regex expression to match the transformation query + destination_type: Transformation destination resource name to filter by. + conflict_mode: Filters by a selected transformation action type: abort/create, upsert, update, delete + cdf_project_name: Project name to filter by configured source and destination project + has_blocked_error: Whether only the blocked transformations should be included in the results. + created_time: Range between two timestamps + last_updated_time: Range between two timestamps + data_set_ids: Return only transformations in the specified data sets with these id(s). + data_set_external_ids: Return only transformations in the specified data sets with these external id(s). + tags: Return only the resource matching the specified tags constraints. It only supports ContainsAny as of now. + limit: Limits the number of results to be returned. To retrieve all results use limit=-1, default limit is 25. Returns: - TransformationList: List of transformations + List of transformations Example: @@ -319,11 +319,11 @@ def retrieve(self, id: int | None = None, external_id: str | None = None) -> Tra `Retrieve a single transformation by id. `_ Args: - id (int | None): ID - external_id (str | None): No description. + id: ID + external_id: No description. Returns: - Transformation | None: Requested transformation or None if it does not exist. + Requested transformation or None if it does not exist. Examples: @@ -350,12 +350,12 @@ def retrieve_multiple( `Retrieve multiple transformations. `_ Args: - ids (Sequence[int] | None): List of ids to retrieve. - external_ids (SequenceNotStr[str] | None): List of external ids to retrieve. - ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception. + ids: List of ids to retrieve. + external_ids: List of external ids to retrieve. + ignore_unknown_ids: Ignore IDs and external IDs that are not found rather than throw an exception. Returns: - TransformationList: Requested transformation or None if it does not exist. + Requested transformation or None if it does not exist. Examples: @@ -398,11 +398,11 @@ def update( `Update one or more transformations `_ Args: - item (Transformation | TransformationWrite | TransformationUpdate | Sequence[Transformation | TransformationWrite | TransformationUpdate]): Transformation(s) to update - mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (Transformation or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. + item: Transformation(s) to update + mode: How to update data when a non-update object is given (Transformation or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. Returns: - Transformation | TransformationList: Updated transformation(s) + Updated transformation(s) Examples: @@ -450,13 +450,13 @@ def run( `Run a transformation. `_ Args: - transformation_id (int | None): Transformation internal id - transformation_external_id (str | None): Transformation external id - wait (bool): Wait until the transformation run is finished. Defaults to True. - timeout (float | None): maximum time (s) to wait, default is None (infinite time). Once the timeout is reached, it returns with the current status. Won't have any effect if wait is False. + transformation_id: Transformation internal id + transformation_external_id: Transformation external id + wait: Wait until the transformation run is finished. Defaults to True. + timeout: maximum time (s) to wait, default is None (infinite time). Once the timeout is reached, it returns with the current status. Won't have any effect if wait is False. Returns: - TransformationJob: Created transformation job + Created transformation job Examples: @@ -486,8 +486,8 @@ def cancel(self, transformation_id: int | None = None, transformation_external_i `Cancel a running transformation. `_ Args: - transformation_id (int | None): Transformation internal id - transformation_external_id (str | None): Transformation external id + transformation_id: Transformation internal id + transformation_external_id: Transformation external id Examples: @@ -520,15 +520,15 @@ def preview( `Preview the result of a query. `_ Args: - query (str | None): SQL query to run for preview. - convert_to_string (bool): Stringify values in the query results, default is False. - limit (int | None): Maximum number of rows to return in the final result, default is 100. - source_limit (int | None): Maximum number of items to read from the data source or None to run without limit, default is 100. - infer_schema_limit (int | None): Limit for how many rows that are used for inferring result schema, default is 10 000. - timeout (int | None): Number of seconds to wait before cancelling a query. The default, and maximum, is 240. + query: SQL query to run for preview. + convert_to_string: Stringify values in the query results, default is False. + limit: Maximum number of rows to return in the final result, default is 100. + source_limit: Maximum number of items to read from the data source or None to run without limit, default is 100. + infer_schema_limit: Limit for how many rows that are used for inferring result schema, default is 10 000. + timeout: Number of seconds to wait before cancelling a query. The default, and maximum, is 240. Returns: - TransformationPreviewResult: Result of the executed query + Result of the executed query Examples: diff --git a/cognite/client/_sync_api/transformations/jobs.py b/cognite/client/_sync_api/transformations/jobs.py index 3834c825f4..9e6818593c 100644 --- a/cognite/client/_sync_api/transformations/jobs.py +++ b/cognite/client/_sync_api/transformations/jobs.py @@ -1,6 +1,6 @@ """ =============================================================================== -a80fccb60645595b2eb2e245964f25fb +1c4cb4580aff601bbc0db5a94295b802 This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -36,12 +36,12 @@ def list( `List all running transformation jobs. `_ Args: - limit (int | None): Limits the number of results to be returned. To retrieve all results use limit=-1, default limit is 25. - transformation_id (int | None): Filters the results by the internal transformation id. - transformation_external_id (str | None): Filters the results by the external transformation id. + limit: Limits the number of results to be returned. To retrieve all results use limit=-1, default limit is 25. + transformation_id: Filters the results by the internal transformation id. + transformation_external_id: Filters the results by the external transformation id. Returns: - TransformationJobList: List of transformation jobs + List of transformation jobs Example: @@ -70,10 +70,10 @@ def retrieve(self, id: int) -> TransformationJob | None: `Retrieve a single transformation job by id. `_ Args: - id (int): Job internal Id + id: Job internal Id Returns: - TransformationJob | None: Requested transformation job or None if it does not exist. + Requested transformation job or None if it does not exist. Examples: @@ -91,10 +91,10 @@ def list_metrics(self, id: int) -> TransformationJobMetricList: `List the metrics of a single transformation job. `_ Args: - id (int): Job internal Id + id: Job internal Id Returns: - TransformationJobMetricList: List of updated metrics of the given job. + List of updated metrics of the given job. Examples: @@ -112,11 +112,11 @@ def retrieve_multiple(self, ids: Sequence[int], ignore_unknown_ids: bool = False `Retrieve multiple transformation jobs by id. `_ Args: - ids (Sequence[int]): Job internal Ids - ignore_unknown_ids (bool): Ignore IDs that are not found rather than throw an exception. + ids: Job internal Ids + ignore_unknown_ids: Ignore IDs that are not found rather than throw an exception. Returns: - TransformationJobList: Requested transformation jobs. + Requested transformation jobs. Examples: diff --git a/cognite/client/_sync_api/transformations/notifications.py b/cognite/client/_sync_api/transformations/notifications.py index 0fb7a04cbe..4e700fba80 100644 --- a/cognite/client/_sync_api/transformations/notifications.py +++ b/cognite/client/_sync_api/transformations/notifications.py @@ -1,6 +1,6 @@ """ =============================================================================== -ffb8afc8b8e4dd416046292bf4dddc81 +2fe8cbaf89649aa2c7268da746d6aa29 This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -59,14 +59,14 @@ def __call__( Iterate over transformation notifications Args: - chunk_size (int | None): Number of notifications to yield per chunk. Defaults to yielding notifications one by one. - transformation_id (int | None): Filter by transformation internal numeric ID. - transformation_external_id (str | None): Filter by transformation externalId. - destination (str | None): Filter by notification destination. - limit (int | None): Limits the number of results to be returned. Defaults to yielding all notifications. + chunk_size: Number of notifications to yield per chunk. Defaults to yielding notifications one by one. + transformation_id: Filter by transformation internal numeric ID. + transformation_external_id: Filter by transformation externalId. + destination: Filter by notification destination. + limit: Limits the number of results to be returned. Defaults to yielding all notifications. Yields: - TransformationNotification | TransformationNotificationList: Yields notifications one by one if chunk_size is None, otherwise yields lists of notifications. + Yields notifications one by one if chunk_size is None, otherwise yields lists of notifications. """ # noqa: DOC404 yield from SyncIterator( self.__async_client.transformations.notifications( @@ -99,10 +99,10 @@ def create( `Subscribe for notifications on the transformation errors. `_ Args: - notification (TransformationNotification | TransformationNotificationWrite | Sequence[TransformationNotification] | Sequence[TransformationNotificationWrite]): Notification or list of notifications to create. + notification: Notification or list of notifications to create. Returns: - TransformationNotification | TransformationNotificationList: Created notification(s) + Created notification(s) Examples: @@ -128,13 +128,13 @@ def list( `List notification subscriptions. `_ Args: - transformation_id (int | None): Filter by transformation internal numeric ID. - transformation_external_id (str | None): Filter by transformation externalId. - destination (str | None): Filter by notification destination. - limit (int | None): Limits the number of results to be returned. To retrieve all results use limit=-1, default limit is 25. + transformation_id: Filter by transformation internal numeric ID. + transformation_external_id: Filter by transformation externalId. + destination: Filter by notification destination. + limit: Limits the number of results to be returned. To retrieve all results use limit=-1, default limit is 25. Returns: - TransformationNotificationList: List of transformation notifications + List of transformation notifications Example: @@ -166,7 +166,7 @@ def delete(self, id: int | Sequence[int] | None = None) -> None: `Deletes the specified notification subscriptions on the transformation. Does nothing when the subscriptions already don't exist `_ Args: - id (int | Sequence[int] | None): Id or list of transformation notification ids + id: Id or list of transformation notification ids Examples: diff --git a/cognite/client/_sync_api/transformations/schedules.py b/cognite/client/_sync_api/transformations/schedules.py index da32825281..67d237cca5 100644 --- a/cognite/client/_sync_api/transformations/schedules.py +++ b/cognite/client/_sync_api/transformations/schedules.py @@ -1,6 +1,6 @@ """ =============================================================================== -465f322855aba108f265e3ba8b07c9aa +2969b5e4e199920556bc7a94d824fb81 This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -49,12 +49,12 @@ def __call__( Iterate over transformation schedules Args: - chunk_size (int | None): The number of schedules to return in each chunk. Defaults to yielding one schedule a time. - include_public (bool): Whether public transformations should be included in the results. (default true). - limit (int | None): Limits the number of results to be returned. Defaults to yielding all schedules. + chunk_size: The number of schedules to return in each chunk. Defaults to yielding one schedule a time. + include_public: Whether public transformations should be included in the results. (default true). + limit: Limits the number of results to be returned. Defaults to yielding all schedules. Yields: - TransformationSchedule | TransformationScheduleList: Yields schedules one by one if chunk_size is None, otherwise yields lists of schedules. + Yields schedules one by one if chunk_size is None, otherwise yields lists of schedules. """ # noqa: DOC404 yield from SyncIterator( self.__async_client.transformations.schedules( @@ -81,10 +81,10 @@ def create( `Schedule the specified transformation with the specified configuration(s). `_ Args: - schedule (TransformationSchedule | TransformationScheduleWrite | Sequence[TransformationSchedule] | Sequence[TransformationScheduleWrite]): Configuration or list of configurations of the schedules to create. + schedule: Configuration or list of configurations of the schedules to create. Returns: - TransformationSchedule | TransformationScheduleList: Created schedule(s) + Created schedule(s) Examples: @@ -104,11 +104,11 @@ def retrieve(self, id: int | None = None, external_id: str | None = None) -> Tra `Retrieve a single transformation schedule by the id or external id of its transformation. `_ Args: - id (int | None): transformation ID - external_id (str | None): transformation External ID + id: transformation ID + external_id: transformation External ID Returns: - TransformationSchedule | None: Requested transformation schedule or None if it does not exist. + Requested transformation schedule or None if it does not exist. Examples: @@ -135,12 +135,12 @@ def retrieve_multiple( `Retrieve multiple transformation schedules by the ids or external ids of the corresponding transformations. `_ Args: - ids (Sequence[int] | None): transformation IDs - external_ids (SequenceNotStr[str] | None): transformation External IDs - ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception. + ids: transformation IDs + external_ids: transformation External IDs + ignore_unknown_ids: Ignore IDs and external IDs that are not found rather than throw an exception. Returns: - TransformationScheduleList: Requested transformation schedules. + Requested transformation schedules. Examples: @@ -166,11 +166,11 @@ def list(self, include_public: bool = True, limit: int | None = DEFAULT_LIMIT_RE `List all transformation schedules. `_ Args: - include_public (bool): Whether public transformations should be included in the results. (default true). - limit (int | None): Limits the number of results to be returned. To retrieve all results use limit=-1, default limit is 25. + include_public: Whether public transformations should be included in the results. (default true). + limit: Limits the number of results to be returned. To retrieve all results use limit=-1, default limit is 25. Returns: - TransformationScheduleList: List of schedules + List of schedules Example: @@ -193,9 +193,9 @@ def delete( `Unschedule one or more transformations `_ Args: - id (int | Sequence[int] | None): Id or list of ids - external_id (str | SequenceNotStr[str] | None): External ID or list of external ids - ignore_unknown_ids (bool): Ignore IDs and external IDs that are not found rather than throw an exception. + id: Id or list of ids + external_id: External ID or list of external ids + ignore_unknown_ids: Ignore IDs and external IDs that are not found rather than throw an exception. Examples: @@ -238,11 +238,11 @@ def update( `Update one or more transformation schedules `_ Args: - item (TransformationSchedule | TransformationScheduleWrite | TransformationScheduleUpdate | Sequence[TransformationSchedule | TransformationScheduleWrite | TransformationScheduleUpdate]): Transformation schedule(s) to update - mode (Literal['replace_ignore_null', 'patch', 'replace']): How to update data when a non-update object is given (TransformationSchedule or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. + item: Transformation schedule(s) to update + mode: How to update data when a non-update object is given (TransformationSchedule or -Write). If you use 'replace_ignore_null', only the fields you have set will be used to replace existing (default). Using 'replace' will additionally clear all the fields that are not specified by you. Last option, 'patch', will update only the fields you have set and for container-like fields such as metadata or labels, add the values to the existing. For more details, see :ref:`appendix-update`. Returns: - TransformationSchedule | TransformationScheduleList: Updated transformation schedule(s) + Updated transformation schedule(s) Examples: diff --git a/cognite/client/_sync_api/transformations/schema.py b/cognite/client/_sync_api/transformations/schema.py index c654b6fd78..7b48facf24 100644 --- a/cognite/client/_sync_api/transformations/schema.py +++ b/cognite/client/_sync_api/transformations/schema.py @@ -1,6 +1,6 @@ """ =============================================================================== -831938d78c499d094223f8b4531026b2 +03b42121d2e346189b87c6ef3856e495 This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -29,11 +29,11 @@ def retrieve( `Get expected schema for a transformation destination. `_ Args: - destination (TransformationDestination): destination for which the schema is requested. - conflict_mode (str | None): conflict mode for which the schema is requested. + destination: destination for which the schema is requested. + conflict_mode: conflict mode for which the schema is requested. Returns: - TransformationSchemaColumnList: List of column descriptions + List of column descriptions Example: diff --git a/cognite/client/_sync_api/unit_system.py b/cognite/client/_sync_api/unit_system.py index 60a9df7a1d..18e9dd8873 100644 --- a/cognite/client/_sync_api/unit_system.py +++ b/cognite/client/_sync_api/unit_system.py @@ -1,6 +1,6 @@ """ =============================================================================== -ebf94ce24437dd81bab208403099ec24 +a3e57d6b670ac6d420094220bc5c790f This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -24,7 +24,7 @@ def list(self) -> UnitSystemList: `List all supported unit systems `_ Returns: - UnitSystemList: List of unit systems + List of unit systems Examples: diff --git a/cognite/client/_sync_api/units.py b/cognite/client/_sync_api/units.py index e7f7461eec..7bf7eeab2c 100644 --- a/cognite/client/_sync_api/units.py +++ b/cognite/client/_sync_api/units.py @@ -1,6 +1,6 @@ """ =============================================================================== -341c216ca7805041eb8b81051219edd4 +19cbba7adda14b61f8452f8be25f9002 This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -40,11 +40,11 @@ def retrieve( `Retrieve one or more unit `_ Args: - external_id (str | SequenceNotStr[str]): External ID or list of external IDs - ignore_unknown_ids (bool): Ignore external IDs that are not found rather than throw an exception. + external_id: External ID or list of external IDs + ignore_unknown_ids: Ignore external IDs that are not found rather than throw an exception. Returns: - Unit | UnitList | None: If a single external ID is specified: the requested unit, or None if it does not exist. If several external IDs are specified: the requested units. + the requested units. Examples: @@ -104,13 +104,13 @@ def from_alias( may not be any close matches, in which case an empty UnitList is returned. Args: - alias (str): Alias of the unit, like 'cmol / L' or 'meter per second'. - quantity (str | None): Quantity of the unit, like 'Temperature' or 'Pressure'. - return_ambiguous (bool): If False (default), when the alias is ambiguous (i.e. no quantity was given), raise a ValueError. If True, return the list of all matching units. - return_closest_matches (bool): If False (default), when the lookup fails, raise a ValueError (default). If True, return the closest matching units (even if empty). + alias: Alias of the unit, like 'cmol / L' or 'meter per second'. + quantity: Quantity of the unit, like 'Temperature' or 'Pressure'. + return_ambiguous: If False (default), when the alias is ambiguous (i.e. no quantity was given), raise a ValueError. If True, return the list of all matching units. + return_closest_matches: If False (default), when the lookup fails, raise a ValueError (default). If True, return the closest matching units (even if empty). Returns: - Unit | UnitList: The unit if found, else a ValueError is raised. If one or both of ``return_ambiguous`` and ``return_closest_matches`` is passed as True, a UnitList may be returned. + The unit if found, else a ValueError is raised. If one or both of ``return_ambiguous`` and ``return_closest_matches`` is passed as True, a UnitList may be returned. Examples: @@ -143,7 +143,7 @@ def list(self) -> UnitList: `List all supported units `_ Returns: - UnitList: List of units + List of units Examples: diff --git a/cognite/client/_sync_api/user_profiles.py b/cognite/client/_sync_api/user_profiles.py index 7f206a9801..51c6d083ba 100644 --- a/cognite/client/_sync_api/user_profiles.py +++ b/cognite/client/_sync_api/user_profiles.py @@ -1,6 +1,6 @@ """ =============================================================================== -ba4cb3e6d29f781dd2dfa201dbaa068a +54c46a8f60ce09d647ad785a4c5b440d This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -42,7 +42,7 @@ def me(self) -> UserProfile: Retrieves the user profile of the principal issuing the request, i.e. the principal *this* AsyncCogniteClient was instantiated with. Returns: - UserProfile: Your own user profile. + Your own user profile. Raises: CogniteAPIError: If this principal doesn't have a user profile, you get a not found (404) response code. @@ -71,10 +71,10 @@ def retrieve(self, user_identifier: str | SequenceNotStr[str]) -> UserProfile | Retrieves one or more user profiles indexed by the user identifier in the same CDF project. Args: - user_identifier (str | SequenceNotStr[str]): The single user identifier (or sequence of) to retrieve profile(s) for. + user_identifier: The single user identifier (or sequence of) to retrieve profile(s) for. Returns: - UserProfile | UserProfileList | None: UserProfileList if a sequence of user identifier were requested, else UserProfile. If a single user identifier is requested and it is not found, None is returned. + UserProfileList if a sequence of user identifier were requested, else UserProfile. If a single user identifier is requested and it is not found, None is returned. Raises: CogniteNotFoundError: A sequences of user identifiers were requested, but one or more does not exist. @@ -100,11 +100,11 @@ def search(self, name: str, limit: int = DEFAULT_LIMIT_READ) -> UserProfileList: Primarily meant for human-centric use-cases and data exploration, not for programs, as the result set ordering and match criteria threshold may change over time. Args: - name (str): Prefix search on name. - limit (int): Maximum number of results to return. + name: Prefix search on name. + limit: Maximum number of results to return. Returns: - UserProfileList: User profiles search result + User profiles search result Examples: @@ -124,10 +124,10 @@ def list(self, limit: int | None = DEFAULT_LIMIT_READ) -> UserProfileList: List all user profiles in the current CDF project. The results are ordered alphabetically by name. Args: - limit (int | None): Maximum number of user profiles to return. Defaults to 25. Set to -1, float("inf") or None to return all. + limit: Maximum number of user profiles to return. Defaults to 25. Set to -1, float("inf") or None to return all. Returns: - UserProfileList: List of user profiles. + List of user profiles. Examples: diff --git a/cognite/client/_sync_api/vision.py b/cognite/client/_sync_api/vision.py index 96c0e89ef1..58b9b8e32a 100644 --- a/cognite/client/_sync_api/vision.py +++ b/cognite/client/_sync_api/vision.py @@ -1,6 +1,6 @@ """ =============================================================================== -275f2ffb4ce9fc0310c3dceba69c4c53 +4dcb782b79fca8ab86d57e9d1e5180b9 This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -30,12 +30,12 @@ def extract( `Start an asynchronous job to extract features from image files. `_ Args: - features (VisionFeature | list[VisionFeature]): The feature(s) to extract from the provided image files. - file_ids (list[int] | None): IDs of the image files to analyze. The images must already be uploaded in the same CDF project. - file_external_ids (list[str] | None): The external file ids of the image files to analyze. - parameters (FeatureParameters | None): No description. + features: The feature(s) to extract from the provided image files. + file_ids: IDs of the image files to analyze. The images must already be uploaded in the same CDF project. + file_external_ids: The external file ids of the image files to analyze. + parameters: No description. Returns: - VisionExtractJob: Resulting queued job, which can be used to retrieve the status of the job or the prediction results if the job is finished. Note that .result property of this job will wait for the job to finish and returns the results. + Resulting queued job, which can be used to retrieve the status of the job or the prediction results if the job is finished. Note that .result property of this job will wait for the job to finish and returns the results. Examples: Start a job, wait for completion and then get the parsed results: @@ -63,10 +63,10 @@ def get_extract_job(self, job_id: int) -> VisionExtractJob: `Retrieve an existing extract job by ID. `_ Args: - job_id (int): ID of an existing feature extraction job. + job_id: ID of an existing feature extraction job. Returns: - VisionExtractJob: Vision extract job, which can be used to retrieve the status of the job or the prediction results if the job is finished. Note that .result property of this job will wait for the job to finish and returns the results. + Vision extract job, which can be used to retrieve the status of the job or the prediction results if the job is finished. Note that .result property of this job will wait for the job to finish and returns the results. Examples: Retrieve a vision extract job by ID: diff --git a/cognite/client/_sync_api/workflows/__init__.py b/cognite/client/_sync_api/workflows/__init__.py index 0905372914..69b157d98d 100644 --- a/cognite/client/_sync_api/workflows/__init__.py +++ b/cognite/client/_sync_api/workflows/__init__.py @@ -1,6 +1,6 @@ """ =============================================================================== -29f6ff693748fa677ae904db8092fb93 +01007117ee0b66c3adc02434b19e6cfe This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -23,7 +23,6 @@ if TYPE_CHECKING: from cognite.client import AsyncCogniteClient - from cognite.client import ClientConfig as ClientConfig class SyncWorkflowAPI(SyncAPIClient): @@ -49,11 +48,11 @@ def __call__( Iterate over workflows Args: - chunk_size (int | None): The number of workflows to return in each chunk. Defaults to yielding one workflow at a time. - limit (int | None): Maximum number of workflows to return. Defaults to returning all items. + chunk_size: The number of workflows to return in each chunk. Defaults to yielding one workflow at a time. + limit: Maximum number of workflows to return. Defaults to returning all items. Yields: - Workflow | WorkflowList: Yields Workflow one by one if chunk_size is None, otherwise yields WorkflowList objects. + Yields Workflow one by one if chunk_size is None, otherwise yields WorkflowList objects. """ # noqa: DOC404 yield from SyncIterator(self.__async_client.workflows(chunk_size=chunk_size, limit=limit)) # type: ignore [misc] @@ -72,11 +71,11 @@ def upsert( Note this is an upsert endpoint, so workflows that already exist will be updated, and new ones will be created. Args: - workflow (WorkflowUpsert | Sequence[WorkflowUpsert]): The workflow(s) to upsert. - mode (Literal['replace']): This is not an option for the API, but is included here to document that the upserts are always done in replace mode. + workflow: The workflow(s) to upsert. + mode: This is not an option for the API, but is included here to document that the upserts are always done in replace mode. Returns: - Workflow | WorkflowList: The created workflow(s). + The created workflow(s). Examples: @@ -109,11 +108,11 @@ def retrieve( `Retrieve one or more workflows. `_ Args: - external_id (str | SequenceNotStr[str]): Identifier (or sequence of identifiers) for a Workflow. Must be unique. - ignore_unknown_ids (bool): When requesting multiple workflows, whether to ignore external IDs that are not found rather than throwing an exception. + external_id: Identifier (or sequence of identifiers) for a Workflow. Must be unique. + ignore_unknown_ids: When requesting multiple workflows, whether to ignore external IDs that are not found rather than throwing an exception. Returns: - Workflow | WorkflowList | None: If a single external ID is specified: the requested workflow, or None if it does not exist. If several external IDs are specified: the requested workflows. + the requested workflows. Examples: @@ -137,8 +136,8 @@ def delete(self, external_id: str | SequenceNotStr[str], ignore_unknown_ids: boo `Delete one or more workflows with versions. `_ Args: - external_id (str | SequenceNotStr[str]): External id or list of external ids to delete. - ignore_unknown_ids (bool): Ignore external ids that are not found rather than throw an exception. + external_id: External id or list of external ids to delete. + ignore_unknown_ids: Ignore external ids that are not found rather than throw an exception. Examples: @@ -158,10 +157,10 @@ def list(self, limit: int | None = DEFAULT_LIMIT_READ) -> WorkflowList: `List workflows in the project. `_ Args: - limit (int | None): Maximum number of results to return. Defaults to 25. Set to -1, float("inf") or None + limit: Maximum number of results to return. Defaults to 25. Set to -1, float("inf") or None Returns: - WorkflowList: Workflows in the CDF project. + Workflows in the CDF project. Examples: diff --git a/cognite/client/_sync_api/workflows/executions.py b/cognite/client/_sync_api/workflows/executions.py index 48d6a5670e..c0c38353b8 100644 --- a/cognite/client/_sync_api/workflows/executions.py +++ b/cognite/client/_sync_api/workflows/executions.py @@ -1,6 +1,6 @@ """ =============================================================================== -8b4f255ab67b6c0158613854048ba3d2 +bfd87e4b37d38491bed90f82798e23ea This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -38,10 +38,10 @@ def retrieve_detailed(self, id: str) -> WorkflowExecutionDetailed | None: `Retrieve a workflow execution with detailed information. `_ Args: - id (str): The server-generated id of the workflow execution. + id: The server-generated id of the workflow execution. Returns: - WorkflowExecutionDetailed | None: The requested workflow execution if it exists, None otherwise. + The requested workflow execution if it exists, None otherwise. Examples: @@ -72,12 +72,12 @@ def run( `Run a workflow execution. `_ Args: - workflow_external_id (str): External id of the workflow. - version (str): Version of the workflow. - input (dict | None): The input to the workflow execution. This will be available for tasks that have specified it as an input with the string "${workflow.input}" See tip below for more information. - metadata (dict | None): Application specific metadata. Keys have a maximum length of 32 characters, values a maximum of 255, and there can be a maximum of 10 key-value pairs. - client_credentials (ClientCredentials | None): Specific credentials that should be used to trigger the workflow execution. When passed will take precedence over the current credentials. - nonce (str | None): The nonce to use to bind the session. If not provided, a new session will be created using the given 'client_credentials'. If this is not given, the current credentials will be used. + workflow_external_id: External id of the workflow. + version: Version of the workflow. + input: The input to the workflow execution. This will be available for tasks that have specified it as an input with the string "${workflow.input}" See tip below for more information. + metadata: Application specific metadata. Keys have a maximum length of 32 characters, values a maximum of 255, and there can be a maximum of 10 key-value pairs. + client_credentials: Specific credentials that should be used to trigger the workflow execution. When passed will take precedence over the current credentials. + nonce: The nonce to use to bind the session. If not provided, a new session will be created using the given 'client_credentials'. If this is not given, the current credentials will be used. Tip: The workflow input can be available in the workflow tasks. For example, if you have a Task with @@ -94,7 +94,7 @@ def run( You can create a session via the Sessions API, using the client.iam.session.create() method. Returns: - WorkflowExecution: The created workflow execution. + The created workflow execution. Examples: @@ -139,14 +139,14 @@ def list( `List workflow executions in the project. `_ Args: - workflow_version_ids (WorkflowVersionIdentifier | MutableSequence[WorkflowVersionIdentifier] | None): Workflow version id or list of workflow version ids to filter on. - created_time_start (int | None): Filter out executions that was created before this time. Time is in milliseconds since epoch. - created_time_end (int | None): Filter out executions that was created after this time. Time is in milliseconds since epoch. - statuses (WorkflowStatus | MutableSequence[WorkflowStatus] | None): Workflow status or list of workflow statuses to filter on. - limit (int | None): Maximum number of results to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + workflow_version_ids: Workflow version id or list of workflow version ids to filter on. + created_time_start: Filter out executions that was created before this time. Time is in milliseconds since epoch. + created_time_end: Filter out executions that was created after this time. Time is in milliseconds since epoch. + statuses: Workflow status or list of workflow statuses to filter on. + limit: Maximum number of results to return. Defaults to 25. Set to -1, float("inf") or None to return all items. Returns: - WorkflowExecutionList: The requested workflow executions. + The requested workflow executions. Examples: @@ -182,12 +182,11 @@ def cancel(self, id: str, reason: str | None) -> WorkflowExecution: other services (like transformations and functions). Args: - id (str): The server-generated id of the workflow execution. - reason (str | None): The reason for the cancellation, this will be put within the execution's `reasonForIncompletion` field. It is defaulted to 'cancelled' if not provided. - + id: The server-generated id of the workflow execution. + reason: The reason for the cancellation, this will be put within the execution's `reasonForIncompletion` field. It is defaulted to 'cancelled' if not provided. Returns: - WorkflowExecution: The canceled workflow execution. + The canceled workflow execution. Examples: @@ -206,11 +205,11 @@ def retry(self, id: str, client_credentials: ClientCredentials | None = None) -> `Retry a workflow execution. `_ Args: - id (str): The server-generated id of the workflow execution. - client_credentials (ClientCredentials | None): Specific credentials that should be used to retry the workflow execution. When passed will take precedence over the current credentials. + id: The server-generated id of the workflow execution. + client_credentials: Specific credentials that should be used to retry the workflow execution. When passed will take precedence over the current credentials. Returns: - WorkflowExecution: The retried workflow execution. + The retried workflow execution. Examples: Retry a workflow execution that has been cancelled or failed: diff --git a/cognite/client/_sync_api/workflows/tasks.py b/cognite/client/_sync_api/workflows/tasks.py index fb5447c817..1176584ed3 100644 --- a/cognite/client/_sync_api/workflows/tasks.py +++ b/cognite/client/_sync_api/workflows/tasks.py @@ -1,6 +1,6 @@ """ =============================================================================== -00f698c970d43d29fc5df777446fe4f3 +b10fdffcf288bd295bdd2a52edd9fadc This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -33,12 +33,12 @@ def update( For tasks that has been marked with 'is_async = True', the status must be updated by calling this endpoint with either 'completed', 'failed' or 'failed_with_terminal_error'. Args: - task_id (str): The server-generated id of the task. - status (Literal['completed', 'failed', 'failed_with_terminal_error']): The new status of the task. Must be either 'completed', 'failed' or 'failed_with_terminal_error'. - output (dict | None): The output of the task. This will be available for tasks that has specified it as an output with the string "${.output}" + task_id: The server-generated id of the task. + status: The new status of the task. Must be either 'completed', 'failed' or 'failed_with_terminal_error'. + output: The output of the task. This will be available for tasks that has specified it as an output with the string "${.output}" Returns: - WorkflowTaskExecution: The updated task execution. + The updated task execution. Examples: diff --git a/cognite/client/_sync_api/workflows/triggers.py b/cognite/client/_sync_api/workflows/triggers.py index fe7348155e..16b8517e48 100644 --- a/cognite/client/_sync_api/workflows/triggers.py +++ b/cognite/client/_sync_api/workflows/triggers.py @@ -1,6 +1,6 @@ """ =============================================================================== -b379199cc5dd6e5f8027cc521220e0d6 +726fd1c5f651769c35b0572f58c41192 This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -39,11 +39,11 @@ def upsert( `Create or update a trigger for a workflow. `_ Args: - workflow_trigger (WorkflowTriggerUpsert): The workflow trigger specification. - client_credentials (ClientCredentials | dict | None): Specific credentials that should be used to trigger the workflow execution. When passed will take precedence over the current credentials. + workflow_trigger: The workflow trigger specification. + client_credentials: Specific credentials that should be used to trigger the workflow execution. When passed will take precedence over the current credentials. Returns: - WorkflowTrigger: The created or updated workflow trigger specification. + The created or updated workflow trigger specification. Examples: @@ -99,7 +99,7 @@ def delete(self, external_id: str | SequenceNotStr[str]) -> None: `Delete one or more triggers for a workflow. `_ Args: - external_id (str | SequenceNotStr[str]): The external id(s) of the trigger(s) to delete. + external_id: The external id(s) of the trigger(s) to delete. Examples: @@ -121,10 +121,10 @@ def list(self, limit: int | None = DEFAULT_LIMIT_READ) -> WorkflowTriggerList: `List the workflow triggers. `_ Args: - limit (int | None): Maximum number of results to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + limit: Maximum number of results to return. Defaults to 25. Set to -1, float("inf") or None to return all items. Returns: - WorkflowTriggerList: The list of triggers. + The list of triggers. Examples: @@ -142,11 +142,11 @@ def list_runs(self, external_id: str, limit: int | None = DEFAULT_LIMIT_READ) -> `List the history of runs for a trigger. `_ Args: - external_id (str): The external id of the trigger to list runs for. - limit (int | None): Maximum number of results to return. Defaults to 25. Set to -1, float("inf") or None to return all items. + external_id: The external id of the trigger to list runs for. + limit: Maximum number of results to return. Defaults to 25. Set to -1, float("inf") or None to return all items. Returns: - WorkflowTriggerRunList: The requested trigger runs. + The requested trigger runs. Examples: @@ -167,7 +167,7 @@ def pause(self, external_id: str) -> None: This operation is idempotent - pausing an already paused trigger has no effect. Args: - external_id (str): The external id of the trigger to pause. + external_id: The external id of the trigger to pause. Examples: @@ -188,7 +188,7 @@ def resume(self, external_id: str) -> None: This operation is idempotent - resuming an already active trigger has no effect. Args: - external_id (str): The external id of the trigger to resume. + external_id: The external id of the trigger to resume. Examples: diff --git a/cognite/client/_sync_api/workflows/versions.py b/cognite/client/_sync_api/workflows/versions.py index 3085b8b049..496b82168f 100644 --- a/cognite/client/_sync_api/workflows/versions.py +++ b/cognite/client/_sync_api/workflows/versions.py @@ -1,6 +1,6 @@ """ =============================================================================== -c89364f15d0ee4178e12fb002ea38399 +7bdbaaa465ad01ded467630eb5232abd This file is auto-generated from the Async API modules, - do not edit manually! =============================================================================== """ @@ -59,12 +59,12 @@ def __call__( Iterate over workflow versions Args: - chunk_size (int | None): The number of workflow versions to return in each chunk. Defaults to yielding one workflow version at a time. - workflow_version_ids (WorkflowIdentifier | MutableSequence[WorkflowIdentifier] | None): Workflow version id or list of workflow version ids to filter on. - limit (int | None): Maximum number of workflow versions to return. Defaults to returning all. + chunk_size: The number of workflow versions to return in each chunk. Defaults to yielding one workflow version at a time. + workflow_version_ids: Workflow version id or list of workflow version ids to filter on. + limit: Maximum number of workflow versions to return. Defaults to returning all. Yields: - WorkflowVersion | WorkflowVersionList: Yields WorkflowVersion one by one if chunk_size is None, otherwise yields WorkflowVersionList objects. + Yields WorkflowVersion one by one if chunk_size is None, otherwise yields WorkflowVersionList objects. """ # noqa: DOC404 yield from SyncIterator( self.__async_client.workflows.versions( @@ -87,11 +87,11 @@ def upsert( Note this is an upsert endpoint, so workflow versions that already exist will be updated, and new ones will be created. Args: - version (WorkflowVersionUpsert | Sequence[WorkflowVersionUpsert]): The workflow version(s) to upsert. - mode (Literal['replace']): This is not an option for the API, but is included here to document that the upserts are always done in replace mode. + version: The workflow version(s) to upsert. + mode: This is not an option for the API, but is included here to document that the upserts are always done in replace mode. Returns: - WorkflowVersion | WorkflowVersionList: The created workflow version(s). + The created workflow version(s). Examples: @@ -136,8 +136,8 @@ def delete( `Delete a workflow version(s). `_ Args: - workflow_version_id (WorkflowVersionIdentifier | MutableSequence[WorkflowVersionId] | MutableSequence[tuple[str, str]]): Workflow version id or list of workflow version ids to delete. - ignore_unknown_ids (bool): Ignore external ids that are not found rather than throw an exception. + workflow_version_id: Workflow version id or list of workflow version ids to delete. + ignore_unknown_ids: Ignore external ids that are not found rather than throw an exception. Examples: @@ -177,11 +177,11 @@ def retrieve( `Retrieve a workflow version. `_ Args: - workflow_external_id (WorkflowVersionIdentifier | Sequence[WorkflowVersionIdentifier] | WorkflowIds): External id of the workflow. - ignore_unknown_ids (bool): When requesting multiple, whether to ignore external IDs that are not found rather than throwing an exception. + workflow_external_id: External id of the workflow. + ignore_unknown_ids: When requesting multiple, whether to ignore external IDs that are not found rather than throwing an exception. Returns: - WorkflowVersion | WorkflowVersionList | None: If a single identifier is specified: the requested workflow version, or None if it does not exist. If several ids are specified: the requested workflow versions. + the requested workflow versions. Examples: @@ -217,11 +217,11 @@ def list( `List workflow versions in the project `_ Args: - workflow_version_ids (WorkflowIdentifier | MutableSequence[WorkflowIdentifier] | None): Workflow version id or list of workflow version ids to filter on. - limit (int | None): Maximum number of results to return. Defaults to 25. Set to -1, float("inf") or None + workflow_version_ids: Workflow version id or list of workflow version ids to filter on. + limit: Maximum number of results to return. Defaults to 25. Set to -1, float("inf") or None Returns: - WorkflowVersionList: The requested workflow versions. + The requested workflow versions. Examples: diff --git a/cognite/client/_sync_cognite_client.py b/cognite/client/_sync_cognite_client.py index 9089078c4a..bfe192112a 100644 --- a/cognite/client/_sync_cognite_client.py +++ b/cognite/client/_sync_cognite_client.py @@ -54,7 +54,7 @@ class CogniteClient: For the asynchronous client, see :class:`~cognite.client._cognite_client.AsyncCogniteClient`. Args: - config (ClientConfig | None): The configuration for this client. + config: The configuration for this client. """ def __init__(self, config: ClientConfig | None = None) -> None: @@ -122,7 +122,7 @@ def version(self) -> str: """Returns the current SDK version. Returns: - str: The current SDK version + The current SDK version """ from cognite.client import __version__ @@ -133,7 +133,7 @@ def config(self) -> ClientConfig: """Returns a config object containing the configuration for the current client. Returns: - ClientConfig: The configuration object. + The configuration object. """ return self.__async_client._config @@ -153,13 +153,13 @@ def default( * Base URL: "https://{cdf_cluster}.cognitedata.com/ Args: - project (str): The CDF project. - cdf_cluster (str): The CDF cluster where the CDF project is located. - credentials (CredentialProvider): Credentials. e.g. Token, ClientCredentials. - client_name (str | None): A user-defined name for the client. Used to identify the number of unique applications/scripts running on top of CDF. If this is not set, the getpass.getuser() is used instead, meaning the username you are logged in with is used. + project: The CDF project. + cdf_cluster: The CDF cluster where the CDF project is located. + credentials: Credentials. e.g. Token, ClientCredentials. + client_name: A user-defined name for the client. Used to identify the number of unique applications/scripts running on top of CDF. If this is not set, the getpass.getuser() is used instead, meaning the username you are logged in with is used. Returns: - CogniteClient: An CogniteClient instance with default configurations. + An CogniteClient instance with default configurations. """ from cognite.client import ClientConfig @@ -185,15 +185,15 @@ def default_oauth_client_credentials( * Scopes: [f"https://{cdf_cluster}.cognitedata.com/.default"] Args: - project (str): The CDF project. - cdf_cluster (str): The CDF cluster where the CDF project is located. - tenant_id (str): The Azure tenant ID. - client_id (str): The Azure client ID. - client_secret (str): The Azure client secret. - client_name (str | None): A user-defined name for the client. Used to identify the number of unique applications/scripts running on top of CDF. If this is not set, the getpass.getuser() is used instead, meaning the username you are logged in with is used. + project: The CDF project. + cdf_cluster: The CDF cluster where the CDF project is located. + tenant_id: The Azure tenant ID. + client_id: The Azure client ID. + client_secret: The Azure client secret. + client_name: A user-defined name for the client. Used to identify the number of unique applications/scripts running on top of CDF. If this is not set, the getpass.getuser() is used instead, meaning the username you are logged in with is used. Returns: - CogniteClient: An CogniteClient instance with default configurations. + An CogniteClient instance with default configurations. """ credentials = OAuthClientCredentials.default_for_entra_id(tenant_id, client_id, client_secret, cdf_cluster) return cls.default(project, cdf_cluster, credentials, client_name) @@ -217,14 +217,14 @@ def default_oauth_interactive( * Scopes: [f"https://{cdf_cluster}.cognitedata.com/.default"] Args: - project (str): The CDF project. - cdf_cluster (str): The CDF cluster where the CDF project is located. - tenant_id (str): The Azure tenant ID. - client_id (str): The Azure client ID. - client_name (str | None): A user-defined name for the client. Used to identify the number of unique applications/scripts running on top of CDF. If this is not set, the getpass.getuser() is used instead, meaning the username you are logged in with is used. + project: The CDF project. + cdf_cluster: The CDF cluster where the CDF project is located. + tenant_id: The Azure tenant ID. + client_id: The Azure client ID. + client_name: A user-defined name for the client. Used to identify the number of unique applications/scripts running on top of CDF. If this is not set, the getpass.getuser() is used instead, meaning the username you are logged in with is used. Returns: - CogniteClient: An CogniteClient instance with default configurations. + An CogniteClient instance with default configurations. """ credentials = OAuthInteractive.default_for_entra_id(tenant_id, client_id, cdf_cluster) return cls.default(project, cdf_cluster, credentials, client_name) @@ -234,10 +234,10 @@ def load(cls, config: dict[str, Any] | str) -> CogniteClient: """Load a cognite client object from a YAML/JSON string or dict. Args: - config (dict[str, Any] | str): A dictionary or YAML/JSON string containing configuration values defined in the CogniteClient class. + config: A dictionary or YAML/JSON string containing configuration values defined in the CogniteClient class. Returns: - CogniteClient: A cognite client object. + A cognite client object. Examples: diff --git a/cognite/client/config.py b/cognite/client/config.py index 37df6dc38b..734079c63f 100644 --- a/cognite/client/config.py +++ b/cognite/client/config.py @@ -18,36 +18,36 @@ class GlobalConfig: """Global configuration object Attributes: - default_client_config (Optional[ClientConfig]): A default instance of a client configuration. This will be used + default_client_config: A default instance of a client configuration. This will be used by the AsyncCogniteClient or CogniteClient constructor if no config is passed directly. Defaults to None. - disable_gzip (bool): Whether or not to disable gzipping of json bodies. Defaults to False. - disable_pypi_version_check (bool): Whether or not to check for newer SDK versions when instantiating a new client. + disable_gzip: Whether or not to disable gzipping of json bodies. Defaults to False. + disable_pypi_version_check: Whether or not to check for newer SDK versions when instantiating a new client. Defaults to False. - status_forcelist (Set[int]): HTTP status codes to retry. Defaults to {429, 502, 503, 504} - max_retries (int): Max number of retries on a given http request. Defaults to 10. - max_retries_connect (int): Max number of retries on connection errors. Defaults to 3. - max_retry_backoff (int): Retry strategy employs exponential backoff. This parameter sets a max on the amount of + status_forcelist: HTTP status codes to retry. Defaults to {429, 502, 503, 504} + max_retries: Max number of retries on a given http request. Defaults to 10. + max_retries_connect: Max number of retries on connection errors. Defaults to 3. + max_retry_backoff: Retry strategy employs exponential backoff. This parameter sets a max on the amount of backoff after any request failure. Defaults to 60. - max_connection_pool_size (int): The maximum number of connections which will be kept in the SDKs connection pool. + max_connection_pool_size: The maximum number of connections which will be kept in the SDKs connection pool. Defaults to 20. - disable_ssl (bool): Whether or not to disable SSL. Defaults to False - proxy (str | None): Route all traffic (HTTP and HTTPS) via this proxy, e.g. "http://localhost:8030". + disable_ssl: Whether or not to disable SSL. Defaults to False + proxy: Route all traffic (HTTP and HTTPS) via this proxy, e.g. "http://localhost:8030". For proxy authentication, embed credentials in the URL: "http://user:pass@localhost:8030". Defaults to None (no proxy). - max_workers (int): DEPRECATED: Use 'concurrency_settings' instead. Maximum number of concurrent API calls. Defaults to 5. - concurrency_settings (ConcurrencySettings): Settings controlling the maximum number of concurrent API requests + max_workers: DEPRECATED: Use 'concurrency_settings' instead. Maximum number of concurrent API calls. Defaults to 5. + concurrency_settings: Settings controlling the maximum number of concurrent API requests for different API categories (general, raw, data_modeling etc.). These settings are frozen after the first API request is made. See https://cognite-sdk-python.readthedocs-hosted.com/en/latest/settings.html#concurrency-settings - follow_redirects (bool): Whether or not to follow redirects. Defaults to False. - file_download_chunk_size (int | None): Specify the file chunk size for streaming file downloads. When not specified + follow_redirects: Whether or not to follow redirects. Defaults to False. + file_download_chunk_size: Specify the file chunk size for streaming file downloads. When not specified (default is None), the actual chunk size is determined by the underlying transport, which in turn is based on the size of the data packets being read from the network socket. The chunks will be of a variable and unpredictable size, but optimized for network efficiency (best download speed). - file_upload_chunk_size (int | None): Override the chunk size for streaming file uploads. Defaults to None, which + file_upload_chunk_size: Override the chunk size for streaming file uploads. Defaults to None, which translates to 65536 (64KiB chunks). - silence_feature_preview_warnings (bool): Whether or not to silence warnings triggered by using alpha or beta + silence_feature_preview_warnings: Whether or not to silence warnings triggered by using alpha or beta features. Defaults to False. - event_loop (asyncio.AbstractEventLoop | None): Override the default event loop used by the SDK. + event_loop: Override the default event loop used by the SDK. """ def __new__(cls) -> GlobalConfig: @@ -115,7 +115,7 @@ def apply_settings(self, settings: dict[str, Any] | str) -> None: This must be done before instantiating an AsyncCogniteClient for the configuration to take effect. Args: - settings (dict[str, Any] | str): A dictionary or YAML/JSON string containing configuration values defined in the GlobalConfig class. + settings: A dictionary or YAML/JSON string containing configuration values defined in the GlobalConfig class. Examples: @@ -158,19 +158,16 @@ class ClientConfig: """Configuration object for the client Args: - client_name (str): A user-defined name for the client. Used to identify number of unique applications/scripts running on top of CDF. - project (str): CDF Project name. - credentials (CredentialProvider): Credentials. e.g. Token, ClientCredentials. - api_subversion (str | None): API subversion - base_url (str | None): Base url to send requests to. Typically on the form 'https://.cognitedata.com'. - Either base_url or cluster must be provided. - cluster (str | None): The cluster where the CDF project is located. When passed, it is assumed that the base - URL can be constructed as: 'https://.cognitedata.com'. Either base_url or cluster must be provided. - headers (dict[str, str] | None): Additional headers to add to all requests. - timeout (int | None): Timeout on requests sent to the api. Defaults to 60 seconds. - file_transfer_timeout (int | None): Timeout on file upload/download requests. Defaults to 600 seconds. - debug (bool): Enables debug logging to stderr. This includes full request/response details and logs regarding retry - attempts (e.g., on 429 throttling or 5xx errors). + client_name: A user-defined name for the client. Used to identify number of unique applications/scripts running on top of CDF. + project: CDF Project name. + credentials: Credentials. e.g. Token, ClientCredentials. + api_subversion: API subversion + base_url: Base url to send requests to. Typically on the form 'https://.cognitedata.com'. Either base_url or cluster must be provided. + cluster: The cluster where the CDF project is located. When passed, it is assumed that the base URL can be constructed as: 'https://.cognitedata.com'. Either base_url or cluster must be provided. + headers: Additional headers to add to all requests. + timeout: Timeout on requests sent to the api. Defaults to 60 seconds. + file_transfer_timeout: Timeout on file upload/download requests. Defaults to 600 seconds. + debug: Enables debug logging to stderr. This includes full request/response details and logs regarding retry attempts (e.g., on 429 throttling or 5xx errors). """ def __init__( @@ -259,13 +256,13 @@ def default( """Create a default client config object. Args: - project (str): CDF Project name. - cdf_cluster (str): The CDF cluster where the CDF project is located. - credentials (CredentialProvider): Credentials. e.g. Token, ClientCredentials. - client_name (str | None): A user-defined name for the client. Used to identify the number of unique applications/scripts running on top of CDF. If this is not set, the getpass.getuser() is used instead, meaning the username you are logged in with is used. + project: CDF Project name. + cdf_cluster: The CDF cluster where the CDF project is located. + credentials: Credentials. e.g. Token, ClientCredentials. + client_name: A user-defined name for the client. Used to identify the number of unique applications/scripts running on top of CDF. If this is not set, the getpass.getuser() is used instead, meaning the username you are logged in with is used. Returns: - ClientConfig: A default client config object. + A default client config object. """ return cls( @@ -280,10 +277,10 @@ def load(cls, config: dict[str, Any] | str) -> ClientConfig: """Load a client config object from a YAML/JSON string or dict. Args: - config (dict[str, Any] | str): A dictionary or YAML/JSON string containing configuration values defined in the ClientConfig class. + config: A dictionary or YAML/JSON string containing configuration values defined in the ClientConfig class. Returns: - ClientConfig: A client config object. + A client config object. Examples: diff --git a/cognite/client/credentials.py b/cognite/client/credentials.py index 6b39a6a112..178e8fd863 100644 --- a/cognite/client/credentials.py +++ b/cognite/client/credentials.py @@ -40,10 +40,10 @@ def load(cls, config: dict[str, Any] | str) -> CredentialProvider: The value of the key is a dictionary containing the configuration for the credential provider. Args: - config (dict[str, Any] | str): A dictionary or YAML/JSON string containing the configuration for the credential provider. + config: A dictionary or YAML/JSON string containing the configuration for the credential provider. Returns: - CredentialProvider: Initialized credential provider of the specified type. + Initialized credential provider of the specified type. Examples: @@ -90,7 +90,7 @@ class Token(CredentialProvider): """Token credential provider Args: - token (str | Callable[[], str]): A token or a token factory. + token: A token or a token factory. Examples: @@ -127,10 +127,10 @@ def load(cls, config: dict[str, str | Callable[[], str]] | str) -> Token: """Load a token credential provider object from a YAML/JSON string or dict. Args: - config (dict[str, str | Callable[[], str]] | str): A dictionary or YAML/JSON string containing configuration values defined in the Token class. + config: A dictionary or YAML/JSON string containing configuration values defined in the Token class. Returns: - Token: Initialized token credential provider. + Initialized token credential provider. Note: A callable token is not supported if passing in a yaml string. @@ -260,17 +260,16 @@ class OAuthDeviceCode(_OAuthCredentialProviderWithTokenRefresh, _WithMsalSeriali """OAuth credential provider for the device code login flow. Args: - authority_url (str | None): MS Entra OAuth authority url, typically "https://login.microsoftonline.com/{tenant_id}" - client_id (str): Your application's client id that allows device code flows. - scopes (list[str] | None): A list of scopes. - cdf_cluster (str | None): The CDF cluster where the CDF project is located. If provided, scopes will be set to - [f"https://{cdf_cluster}.cognitedata.com/IDENTITY https://{cdf_cluster}.cognitedata.com/user_impersonation openid profile"]. - oauth_discovery_url (str | None): Standard OAuth discovery URL, should be where "/.well-known/openid-configuration" is found. - token_cache_path (Path | None): Location to store token cache, defaults to os temp directory/cognitetokencache.{client_id}.bin. - token_expiry_leeway_seconds (int): The token is refreshed at the earliest when this number of seconds is left before expiry. Default: 30 sec - clear_cache (bool): If True, the token cache will be cleared on initialization. Default: False - mem_cache_only (bool): If True, the token cache will only be stored in memory. Default: False - **token_custom_args (Any): Additional request parameters to pass to the authorization endpoint. + authority_url: MS Entra OAuth authority url, typically "https://login.microsoftonline.com/{tenant_id}" + client_id: Your application's client id that allows device code flows. + scopes: A list of scopes. + cdf_cluster: The CDF cluster where the CDF project is located. If provided, scopes will be set to [f"https://{cdf_cluster}.cognitedata.com/IDENTITY https://{cdf_cluster}.cognitedata.com/user_impersonation openid profile"]. + oauth_discovery_url: Standard OAuth discovery URL, should be where "/.well-known/openid-configuration" is found. + token_cache_path: Location to store token cache, defaults to os temp directory/cognitetokencache.{client_id}.bin. + token_expiry_leeway_seconds: The token is refreshed at the earliest when this number of seconds is left before expiry. Default: 30 sec + clear_cache: If True, the token cache will be cleared on initialization. Default: False + mem_cache_only: If True, the token cache will only be stored in memory. Default: False + **token_custom_args: Additional request parameters to pass to the authorization endpoint. Examples: >>> from cognite.client.credentials import OAuthDeviceCode @@ -409,11 +408,11 @@ def _get_device_code_response(self, device_auth_endpoint: str, data: dict[str, A """Initiate device code flow and return the device flow object. Args: - device_auth_endpoint (str): The device authorization endpoint URL. - data (dict[str, Any]): The request data (scope, client_id, etc.). + device_auth_endpoint: The device authorization endpoint URL. + data: The request data (scope, client_id, etc.). Returns: - dict[str, Any]: The device flow object containing device_code, user_code, etc. + The device flow object containing device_code, user_code, etc. """ try: @@ -534,10 +533,10 @@ def load(cls, config: dict[str, Any] | str) -> OAuthDeviceCode: """Load a OAuth device code credential provider object from a YAML/JSON string or dict. Args: - config (dict[str, Any] | str): A dictionary or YAML/JSON string containing configuration values defined in the OAuthDeviceCode class. + config: A dictionary or YAML/JSON string containing configuration values defined in the OAuthDeviceCode class. Returns: - OAuthDeviceCode: Initialized OAuthDeviceCode credential provider. + Initialized OAuthDeviceCode credential provider. Examples: @@ -588,15 +587,15 @@ def default_for_entra_id( * Scopes: [f"https://{cdf_cluster}.cognitedata.com/.default"] Args: - tenant_id (str): The Azure tenant id - client_id (str): An app registration that allows device code flow. - cdf_cluster (str): The CDF cluster where the CDF project is located. - token_cache_path (Path | None): Location to store token cache, defaults to os temp directory/cognitetokencache.{client_id}.bin. - token_expiry_leeway_seconds (int): The token is refreshed at the earliest when this number of seconds is left before expiry. Default: 30 sec - clear_cache (bool): If True, the token cache will be cleared on initialization. Default: False - mem_cache_only (bool): If True, the token cache will only be stored in memory. Default: False + tenant_id: The Azure tenant id + client_id: An app registration that allows device code flow. + cdf_cluster: The CDF cluster where the CDF project is located. + token_cache_path: Location to store token cache, defaults to os temp directory/cognitetokencache.{client_id}.bin. + token_expiry_leeway_seconds: The token is refreshed at the earliest when this number of seconds is left before expiry. Default: 30 sec + clear_cache: If True, the token cache will be cleared on initialization. Default: False + mem_cache_only: If True, the token cache will only be stored in memory. Default: False Returns: - OAuthDeviceCode: An OAuthDeviceCode instance + An OAuthDeviceCode instance """ return cls( authority_url=f"https://login.microsoftonline.com/{tenant_id}", @@ -636,12 +635,12 @@ class OAuthInteractive(_OAuthCredentialProviderWithTokenRefresh, _WithMsalSerial Make sure you have http://localhost:port in Redirect URI in App Registration as type "Mobile and desktop applications". Args: - authority_url (str): OAuth authority url - client_id (str): Your application's client id. - scopes (list[str]): A list of scopes. - redirect_port (int): Redirect port defaults to 53000. - token_cache_path (Path | None): Location to store token cache, defaults to os temp directory/cognitetokencache.{client_id}.bin. - token_expiry_leeway_seconds (int): The token is refreshed at the earliest when this number of seconds is left before expiry. Default: 30 sec + authority_url: OAuth authority url + client_id: Your application's client id. + scopes: A list of scopes. + redirect_port: Redirect port defaults to 53000. + token_cache_path: Location to store token cache, defaults to os temp directory/cognitetokencache.{client_id}.bin. + token_expiry_leeway_seconds: The token is refreshed at the earliest when this number of seconds is left before expiry. Default: 30 sec Examples: @@ -714,10 +713,10 @@ def load(cls, config: dict[str, Any] | str) -> OAuthInteractive: """Load a OAuth interactive credential provider object from a YAML/JSON string or dict. Args: - config (dict[str, Any] | str): A dictionary or YAML/JSON string containing configuration values defined in the OAuthInteractive class. + config: A dictionary or YAML/JSON string containing configuration values defined in the OAuthInteractive class. Returns: - OAuthInteractive: Initialized OAuthInteractive credential provider. + Initialized OAuthInteractive credential provider. Examples: @@ -760,14 +759,14 @@ def default_for_entra_id( * Scopes: [f"https://{cdf_cluster}.cognitedata.com/.default"] Args: - tenant_id (str): The Azure tenant id - client_id (str): Your application's client id. - cdf_cluster (str): The CDF cluster where the CDF project is located. - token_expiry_leeway_seconds (int): The token is refreshed at the earliest when this number of seconds is left before expiry. Default: 30 sec - **token_custom_args (Any): Optional additional arguments to pass as query parameters to the token fetch request. + tenant_id: The Azure tenant id + client_id: Your application's client id. + cdf_cluster: The CDF cluster where the CDF project is located. + token_expiry_leeway_seconds: The token is refreshed at the earliest when this number of seconds is left before expiry. Default: 30 sec + **token_custom_args: Optional additional arguments to pass as query parameters to the token fetch request. Returns: - OAuthInteractive: An OAuthInteractive instance + An OAuthInteractive instance """ return cls( authority_url=f"https://login.microsoftonline.com/{tenant_id}", @@ -797,12 +796,12 @@ class OAuthClientCredentials(_OAuthCredentialProviderWithTokenRefresh): """OAuth credential provider for the "Client Credentials" flow. Args: - token_url (str): OAuth token url - client_id (str): Your application's client id. - client_secret (str): Your application's client secret - scopes (list[str] | None): A list of scopes. - token_expiry_leeway_seconds (int): The token is refreshed at the earliest when this number of seconds is left before expiry. Default: 30 sec - **token_custom_args (Any): Optional additional arguments to pass as query parameters to the token fetch request. + token_url: OAuth token url + client_id: Your application's client id. + client_secret: Your application's client secret + scopes: A list of scopes. + token_expiry_leeway_seconds: The token is refreshed at the earliest when this number of seconds is left before expiry. Default: 30 sec + **token_custom_args: Optional additional arguments to pass as query parameters to the token fetch request. Examples: @@ -898,10 +897,10 @@ def load(cls, config: dict[str, Any] | str) -> OAuthClientCredentials: """Load a OAuth client credentials credential provider object from a YAML/JSON string or dict. Args: - config (dict[str, Any] | str): A dictionary or YAML/JSON string containing configuration values defined in the OAuthClientCredentials class. + config: A dictionary or YAML/JSON string containing configuration values defined in the OAuthClientCredentials class. Returns: - OAuthClientCredentials: Initialized OAuthClientCredentials credential provider. + Initialized OAuthClientCredentials credential provider. Examples: @@ -947,15 +946,15 @@ def default_for_entra_id( * Scopes: [f"https://{cdf_cluster}.cognitedata.com/.default"] Args: - tenant_id (str): The Azure tenant id - client_id (str): Your application's client id. - client_secret (str): Your application's client secret. - cdf_cluster (str): The CDF cluster where the CDF project is located. - token_expiry_leeway_seconds (int): The token is refreshed at the earliest when this number of seconds is left before expiry. Default: 30 sec - **token_custom_args (Any): Optional additional arguments to pass as query parameters to the token fetch request. + tenant_id: The Azure tenant id + client_id: Your application's client id. + client_secret: Your application's client secret. + cdf_cluster: The CDF cluster where the CDF project is located. + token_expiry_leeway_seconds: The token is refreshed at the earliest when this number of seconds is left before expiry. Default: 30 sec + **token_custom_args: Optional additional arguments to pass as query parameters to the token fetch request. Returns: - OAuthClientCredentials: An OAuthClientCredentials instance + An OAuthClientCredentials instance """ return cls( token_url=f"https://login.microsoftonline.com/{tenant_id}/oauth2/v2.0/token", @@ -986,12 +985,12 @@ class OAuthClientCertificate(_OAuthCredentialProviderWithTokenRefresh): """OAuth credential provider for authenticating with a client certificate. Args: - authority_url (str): OAuth authority url - client_id (str): Your application's client id. - cert_thumbprint (str): Your certificate's thumbprint. You get it when you upload your certificate to Azure AD. - certificate (str): Your private certificate, typically read from a .pem file - scopes (list[str]): A list of scopes. - token_expiry_leeway_seconds (int): The token is refreshed at the earliest when this number of seconds is left before expiry. Default: 30 sec + authority_url: OAuth authority url + client_id: Your application's client id. + cert_thumbprint: Your certificate's thumbprint. You get it when you upload your certificate to Azure AD. + certificate: Your private certificate, typically read from a .pem file + scopes: A list of scopes. + token_expiry_leeway_seconds: The token is refreshed at the earliest when this number of seconds is left before expiry. Default: 30 sec Examples: @@ -1059,10 +1058,10 @@ def load(cls, config: dict[str, Any] | str) -> OAuthClientCertificate: """Load a OAuth client certificate credential provider object from a YAML/JSON string or dict. Args: - config (dict[str, Any] | str): A dictionary or YAML/JSON string containing configuration values defined in the OAuthClientCertificate class. + config: A dictionary or YAML/JSON string containing configuration values defined in the OAuthClientCertificate class. Returns: - OAuthClientCertificate: Initialized OAuthClientCertificate credential provider. + Initialized OAuthClientCertificate credential provider. Examples: diff --git a/cognite/client/data_classes/_base.py b/cognite/client/data_classes/_base.py index c6d8fcb60d..0b3e5017f2 100644 --- a/cognite/client/data_classes/_base.py +++ b/cognite/client/data_classes/_base.py @@ -89,10 +89,10 @@ def dump(self, camel_case: bool = True) -> dict[str, Any]: """Dump the instance into a json serializable Python data type. Args: - camel_case (bool): Use camelCase for attribute names. Defaults to True. + camel_case: Use camelCase for attribute names. Defaults to True. Returns: - dict[str, Any]: A dictionary representation of the instance. + A dictionary representation of the instance. """ return basic_instance_dump(self, camel_case=camel_case) @@ -100,7 +100,7 @@ def dump_yaml(self) -> str: """Dump the instance into a YAML formatted string. Returns: - str: A YAML formatted string representing the instance. + A YAML formatted string representing the instance. """ yaml = local_import("yaml") return yaml.safe_dump(self.dump(camel_case=True), sort_keys=False) @@ -122,10 +122,10 @@ def _load(cls, resource: dict[str, Any]) -> Self: Subclasses must implement this method to handle their specific resource loading logic. Args: - resource (dict[str, Any]): The resource to load. + resource: The resource to load. Returns: - Self: The loaded resource. + The loaded resource. """ raise NotImplementedError @@ -147,14 +147,14 @@ def to_pandas( """Convert the instance into a pandas DataFrame. Args: - expand_metadata (bool): Expand the metadata into separate rows (default: False). - metadata_prefix (str): Prefix to use for the metadata rows, if expanded. - ignore (list[str] | None): List of row keys to skip when converting to a data frame. Is applied before expansions. - camel_case (bool): Convert attribute names to camel case (e.g. `externalId` instead of `external_id`). Does not affect custom data like metadata if expanded. - convert_timestamps (bool): Convert known attributes storing CDF timestamps (milliseconds since epoch) to datetime. Does not affect custom data like metadata. + expand_metadata: Expand the metadata into separate rows (default: False). + metadata_prefix: Prefix to use for the metadata rows, if expanded. + ignore: List of row keys to skip when converting to a data frame. Is applied before expansions. + camel_case: Convert attribute names to camel case (e.g. `externalId` instead of `external_id`). Does not affect custom data like metadata if expanded. + convert_timestamps: Convert known attributes storing CDF timestamps (milliseconds since epoch) to datetime. Does not affect custom data like metadata. Returns: - pandas.DataFrame: The dataframe. + The dataframe. """ pd = local_import("pandas") @@ -303,10 +303,10 @@ def dump(self, camel_case: bool = True) -> list[dict[str, Any]]: """Dump the instance into a json serializable Python data type. Args: - camel_case (bool): Use camelCase for attribute names. Defaults to True. + camel_case: Use camelCase for attribute names. Defaults to True. Returns: - list[dict[str, Any]]: A list of dicts representing the instance. + A list of dicts representing the instance. """ return [resource.dump(camel_case) for resource in self.data] @@ -314,7 +314,7 @@ def dump_yaml(self) -> str: """Dump the instances into a YAML formatted string. Returns: - str: A YAML formatted string representing the instances. + A YAML formatted string representing the instances. """ yaml = local_import("yaml") return yaml.safe_dump(self.dump(camel_case=True), sort_keys=False) @@ -328,12 +328,12 @@ def get( """Get an item from this list by id, external_id or instance_id. Args: - id (int | None): The id of the item to get. - external_id (str | None): The external_id of the item to get. - instance_id (InstanceId | tuple[str, str] | None): The instance_id of the item to get. + id: The id of the item to get. + external_id: The external_id of the item to get. + instance_id: The instance_id of the item to get. Returns: - T_CogniteResource | None: The requested item if present, otherwise None. + The requested item if present, otherwise None. """ (ident := IdentifierSequence.load(id, external_id, instance_id)).assert_singleton() if id: @@ -354,13 +354,13 @@ def to_pandas( keys in the metadata that already exist in the DataFrame, then an error will be raised by pd.join. Args: - camel_case (bool): Convert column names to camel case (e.g. `externalId` instead of `external_id`) - expand_metadata (bool): Expand the metadata column into separate columns. - metadata_prefix (str): Prefix to use for metadata columns. - convert_timestamps (bool): Convert known columns storing CDF timestamps (milliseconds since epoch) to datetime. Does not affect custom data like metadata. + camel_case: Convert column names to camel case (e.g. `externalId` instead of `external_id`) + expand_metadata: Expand the metadata column into separate columns. + metadata_prefix: Prefix to use for metadata columns. + convert_timestamps: Convert known columns storing CDF timestamps (milliseconds since epoch) to datetime. Does not affect custom data like metadata. Returns: - pandas.DataFrame: The Cognite resource as a dataframe. + The Cognite resource as a dataframe. """ pd = local_import("pandas") from cognite.client.utils._pandas_helpers import ( @@ -419,10 +419,10 @@ def dump_raw(self, camel_case: bool = True) -> dict[str, Any]: """This method dumps the list with extra information in addition to the items. Args: - camel_case (bool): Use camelCase for attribute names. Defaults to True. + camel_case: Use camelCase for attribute names. Defaults to True. Returns: - dict[str, Any]: A dictionary representation of the list. + A dictionary representation of the list. """ return {"items": [resource.dump(camel_case) for resource in self.data]} @@ -581,10 +581,10 @@ def dump(self, camel_case: Literal[True] = True) -> dict[str, Any]: """Dump the instance into a json serializable Python data type. Args: - camel_case (Literal[True]): No description. + camel_case: No description. Returns: - dict[str, Any]: A dictionary representation of the instance. + A dictionary representation of the instance. """ dumped: dict[str, Any] = {"update": self._update_object} if self._id is not None: @@ -694,10 +694,10 @@ def dump(self, camel_case: bool = True) -> dict[str, Any]: """Dump the instance into a json serializable Python data type. Args: - camel_case (bool): Use camelCase for attribute names. Defaults to True. + camel_case: Use camelCase for attribute names. Defaults to True. Returns: - dict[str, Any]: A dictionary representation of the instance. + A dictionary representation of the instance. """ return basic_instance_dump(self, camel_case=camel_case) @@ -831,7 +831,7 @@ def as_external_ids(self) -> list[str]: ValueError: If any resource in the list does not have an external id. Returns: - list[str]: The external ids of all resources in the list. + The external ids of all resources in the list. """ external_ids: list[str] = [] for x in self: @@ -850,7 +850,7 @@ def as_names(self) -> list[str]: ValueError: If any resource in the list does not have a name. Returns: - list[str]: The names of all resources in the list. + The names of all resources in the list. """ names: list[str] = [] for x in self: @@ -869,7 +869,7 @@ def as_ids(self) -> list[int]: ValueError: If any resource in the list does not have an id. Returns: - list[int]: The ids of all resources in the list. + The ids of all resources in the list. """ ids: list[int] = [] for x in self: diff --git a/cognite/client/data_classes/agents/agent_tools.py b/cognite/client/data_classes/agents/agent_tools.py index afe27c4472..7edf710caf 100644 --- a/cognite/client/data_classes/agents/agent_tools.py +++ b/cognite/client/data_classes/agents/agent_tools.py @@ -17,8 +17,8 @@ class AgentToolCore(WriteableCogniteResource["AgentToolUpsert"], ABC): """Core representation of an AI Agent Tool in CDF. Args: - name (str): The name of the agent tool. Used by the agent to decide when to use this tool. - description (str): The description of the agent tool. Used by the agent to decide when to use this tool. + name: The name of the agent tool. Used by the agent to decide when to use this tool. + description: The description of the agent tool. Used by the agent to decide when to use this tool. """ _type: ClassVar[str] # Will be set by concrete classes @@ -39,8 +39,8 @@ class AgentToolUpsert(AgentToolCore, ABC): """The write format of an agent tool. Args: - name (str): The name of the agent tool. Used by the agent to decide when to use this tool. - description (str): The description of the agent tool. Used by the agent to decide when to use this tool. + name: The name of the agent tool. Used by the agent to decide when to use this tool. + description: The description of the agent tool. Used by the agent to decide when to use this tool. """ def dump(self, camel_case: bool = True) -> dict[str, Any]: @@ -66,8 +66,8 @@ class AgentTool(AgentToolCore, ABC): """The read format of an agent tool. Args: - name (str): The name of the agent tool. Used by the agent to decide when to use this tool. - description (str): The description of the agent tool. Used by the agent to decide when to use this tool. + name: The name of the agent tool. Used by the agent to decide when to use this tool. + description: The description of the agent tool. Used by the agent to decide when to use this tool. """ def dump(self, camel_case: bool = True) -> dict[str, Any]: @@ -105,10 +105,10 @@ class DataModelInfo(WriteableCogniteResource): """Information about a data model used in knowledge graph queries. Args: - space (str): The space of the data model. - external_id (str): The external ID of the data model. - version (str): The version of the data model. - view_external_ids (Sequence[str] | None): The external IDs of the views of the data model. + space: The space of the data model. + external_id: The external ID of the data model. + version: The version of the data model. + view_external_ids: The external IDs of the views of the data model. """ space: str @@ -138,8 +138,8 @@ class InstanceSpaces(WriteableCogniteResource): """Configuration for instance spaces in knowledge graph queries. Args: - type (Literal["manual", "all"]): The type of instance spaces. - spaces (Sequence[str] | None): The spaces of the instance spaces. + type: The type of instance spaces. + spaces: The spaces of the instance spaces. """ type: Literal["manual", "all"] @@ -161,9 +161,9 @@ class QueryKnowledgeGraphAgentToolConfiguration(WriteableCogniteResource): """Configuration for knowledge graph query agent tools. Args: - data_models (Sequence[DataModelInfo]): The data models and views to query. - instance_spaces (InstanceSpaces | None): The instance spaces to query. - version (str | None): The version of the query generation strategy to use. A higher number does not necessarily mean a better query. Supported values are "v1" and "v2". + data_models: The data models and views to query. + instance_spaces: The instance spaces to query. + version: The version of the query generation strategy to use. A higher number does not necessarily mean a better query. Supported values are "v1" and "v2". """ data_models: Sequence[DataModelInfo] @@ -199,8 +199,8 @@ class SummarizeDocumentAgentTool(AgentTool): """Agent tool for summarizing documents. Args: - name (str): The name of the agent tool. Used by the agent to decide when to use this tool. - description (str): The description of the agent tool. Used by the agent to decide when to use this tool. + name: The name of the agent tool. Used by the agent to decide when to use this tool. + description: The description of the agent tool. Used by the agent to decide when to use this tool. """ _type: ClassVar[str] = "summarizeDocument" @@ -224,8 +224,8 @@ class SummarizeDocumentAgentToolUpsert(AgentToolUpsert): """Upsert version of document summarization agent tool. Args: - name (str): The name of the agent tool. Used by the agent to decide when to use this tool. - description (str): The description of the agent tool. Used by the agent to decide when to use this tool. + name: The name of the agent tool. Used by the agent to decide when to use this tool. + description: The description of the agent tool. Used by the agent to decide when to use this tool. """ _type: ClassVar[str] = "summarizeDocument" @@ -243,8 +243,8 @@ class AskDocumentAgentTool(AgentTool): """Agent tool for asking questions about documents. Args: - name (str): The name of the agent tool. Used by the agent to decide when to use this tool. - description (str): The description of the agent tool. Used by the agent to decide when to use this tool. + name: The name of the agent tool. Used by the agent to decide when to use this tool. + description: The description of the agent tool. Used by the agent to decide when to use this tool. """ _type: ClassVar[str] = "askDocument" @@ -268,8 +268,8 @@ class AskDocumentAgentToolUpsert(AgentToolUpsert): """Upsert version of document question agent tool. Args: - name (str): The name of the agent tool. Used by the agent to decide when to use this tool. - description (str): The description of the agent tool. Used by the agent to decide when to use this tool. + name: The name of the agent tool. Used by the agent to decide when to use this tool. + description: The description of the agent tool. Used by the agent to decide when to use this tool. """ _type: ClassVar[str] = "askDocument" @@ -287,9 +287,9 @@ class QueryKnowledgeGraphAgentTool(AgentTool): """Agent tool for querying knowledge graphs. Args: - name (str): The name of the agent tool. Used by the agent to decide when to use this tool. - description (str): The description of the agent tool. Used by the agent to decide when to use this tool. - configuration (QueryKnowledgeGraphAgentToolConfiguration | None): The configuration of the knowledge graph query agent tool. + name: The name of the agent tool. Used by the agent to decide when to use this tool. + description: The description of the agent tool. Used by the agent to decide when to use this tool. + configuration: The configuration of the knowledge graph query agent tool. """ _type: ClassVar[str] = "queryKnowledgeGraph" @@ -322,9 +322,9 @@ class QueryKnowledgeGraphAgentToolUpsert(AgentToolUpsert): """Upsert version of knowledge graph query agent tool. Args: - name (str): The name of the agent tool. Used by the agent to decide when to use this tool. - description (str): The description of the agent tool. Used by the agent to decide when to use this tool. - configuration (QueryKnowledgeGraphAgentToolConfiguration | None): The configuration of the knowledge graph query agent tool. + name: The name of the agent tool. Used by the agent to decide when to use this tool. + description: The description of the agent tool. Used by the agent to decide when to use this tool. + configuration: The configuration of the knowledge graph query agent tool. """ _type: ClassVar[str] = "queryKnowledgeGraph" @@ -350,8 +350,8 @@ class QueryTimeSeriesDatapointsAgentTool(AgentTool): """Agent tool for querying time series datapoints. Args: - name (str): The name of the agent tool. Used by the agent to decide when to use this tool. - description (str): The description of the agent tool. Used by the agent to decide when to use this tool. + name: The name of the agent tool. Used by the agent to decide when to use this tool. + description: The description of the agent tool. Used by the agent to decide when to use this tool. """ _type: ClassVar[str] = "queryTimeSeriesDatapoints" @@ -375,8 +375,8 @@ class QueryTimeSeriesDatapointsAgentToolUpsert(AgentToolUpsert): """Upsert version of time series datapoints query agent tool. Args: - name (str): The name of the agent tool. Used by the agent to decide when to use this tool. - description (str): The description of the agent tool. Used by the agent to decide when to use this tool. + name: The name of the agent tool. Used by the agent to decide when to use this tool. + description: The description of the agent tool. Used by the agent to decide when to use this tool. """ _type: ClassVar[str] = "queryTimeSeriesDatapoints" @@ -394,10 +394,10 @@ class UnknownAgentTool(AgentTool): """Agent tool for unknown/unrecognized tool types. Args: - name (str): The name of the agent tool. Used by the agent to decide when to use this tool. - description (str): The description of the agent tool. Used by the agent to decide when to use this tool. - type (str): The type of the agent tool. - configuration (dict[str, Any] | None): The configuration of the agent tool. + name: The name of the agent tool. Used by the agent to decide when to use this tool. + description: The description of the agent tool. Used by the agent to decide when to use this tool. + type: The type of the agent tool. + configuration: The configuration of the agent tool. """ type: str @@ -426,10 +426,10 @@ class UnknownAgentToolUpsert(AgentToolUpsert): """Upsert version of unknown agent tool. Args: - name (str): The name of the agent tool. Used by the agent to decide when to use this tool. - type (str): The type of the agent tool. - description (str): The description of the agent tool. Used by the agent to decide when to use this tool. - configuration (dict[str, Any] | None): The configuration of the agent tool. + name: The name of the agent tool. Used by the agent to decide when to use this tool. + type: The type of the agent tool. + description: The description of the agent tool. Used by the agent to decide when to use this tool. + configuration: The configuration of the agent tool. """ type: str diff --git a/cognite/client/data_classes/agents/agents.py b/cognite/client/data_classes/agents/agents.py index 95ee4c9897..30366fa3c5 100644 --- a/cognite/client/data_classes/agents/agents.py +++ b/cognite/client/data_classes/agents/agents.py @@ -23,12 +23,12 @@ class AgentCore(WriteableCogniteResource["AgentUpsert"]): """Core representation of an AI agent. Args: - external_id (str): The external ID provided by the client. Must be unique for the resource type. - name (str): The name of the agent. - description (str | None): The description of the agent. - instructions (str | None): Instructions for the agent. - model (str | None): Name of the language model to use. For example, "azure/gpt-4o", "gcp/gemini-2.0" or "aws/claude-3.5-sonnet". - labels (list[str] | None): Labels for the agent. For example, ["published"] to mark an agent as published. + external_id: The external ID provided by the client. Must be unique for the resource type. + name: The name of the agent. + description: The description of the agent. + instructions: Instructions for the agent. + model: Name of the language model to use. For example, "azure/gpt-4o", "gcp/gemini-2.0" or "aws/claude-3.5-sonnet". + labels: Labels for the agent. For example, ["published"] to mark an agent as published. """ external_id: str @@ -44,13 +44,13 @@ class AgentUpsert(AgentCore): This is the write format of an agent. Args: - external_id (str): The external ID provided by the client. Must be unique for the resource type. - name (str): The name of the agent, for use in user interfaces. - description (str | None): The human readable description of the agent. - instructions (str | None): Instructions for the agent. - model (str | None): Name of the language model to use. For example, "azure/gpt-4o", "gcp/gemini-2.0" or "aws/claude-3.5-sonnet". - labels (list[str] | None): Labels for the agent. For example, ["published"] to mark an agent as published. - tools (Sequence[AgentToolUpsert] | None): List of tools for the agent. + external_id: The external ID provided by the client. Must be unique for the resource type. + name: The name of the agent, for use in user interfaces. + description: The human readable description of the agent. + instructions: Instructions for the agent. + model: Name of the language model to use. For example, "azure/gpt-4o", "gcp/gemini-2.0" or "aws/claude-3.5-sonnet". + labels: Labels for the agent. For example, ["published"] to mark an agent as published. + tools: List of tools for the agent. """ @@ -117,16 +117,16 @@ class Agent(AgentCore): This is the read format of an agent. Args: - external_id (str): The external ID provided by the client. Must be unique for the resource type. - name (str): The name of the agent, for use in user interfaces. - description (str | None): The human readable description of the agent. Always present in API responses. - instructions (str | None): Instructions for the agent. Always present in API responses. - model (str | None): Name of the language model to use. For example, "azure/gpt-4o", "gcp/gemini-2.0" or "aws/claude-3.5-sonnet". Always present in API responses. - labels (list[str] | None): Labels for the agent. For example, ["published"] to mark an agent as published. Always present in API responses. - tools (Sequence[AgentTool] | None): List of tools for the agent. - created_time (int): The time the agent was created, in milliseconds since Thursday, 1 January 1970 00:00:00 UTC, minus leap seconds. - last_updated_time (int): The time the agent was last updated, in milliseconds since Thursday, 1 January 1970 00:00:00 UTC, minus leap seconds. - owner_id (str | None): The ID of the user who owns the agent. + external_id: The external ID provided by the client. Must be unique for the resource type. + name: The name of the agent, for use in user interfaces. + description: The human readable description of the agent. Always present in API responses. + instructions: Instructions for the agent. Always present in API responses. + model: Name of the language model to use. For example, "azure/gpt-4o", "gcp/gemini-2.0" or "aws/claude-3.5-sonnet". Always present in API responses. + labels: Labels for the agent. For example, ["published"] to mark an agent as published. Always present in API responses. + tools: List of tools for the agent. + created_time: The time the agent was created, in milliseconds since Thursday, 1 January 1970 00:00:00 UTC, minus leap seconds. + last_updated_time: The time the agent was last updated, in milliseconds since Thursday, 1 January 1970 00:00:00 UTC, minus leap seconds. + owner_id: The ID of the user who owns the agent. """ def __init__( diff --git a/cognite/client/data_classes/agents/chat.py b/cognite/client/data_classes/agents/chat.py index d7b1f98e31..0f43b9b716 100644 --- a/cognite/client/data_classes/agents/chat.py +++ b/cognite/client/data_classes/agents/chat.py @@ -40,7 +40,7 @@ class TextContent(MessageContent): """Text content for messages. Args: - text (str): The text content. + text: The text content. """ _type: ClassVar[str] = "text" @@ -56,8 +56,8 @@ class UnknownContent(MessageContent): """Unknown content type for forward compatibility. Args: - data (dict[str, Any]): The raw content data. - type (str): The content type. + data: The raw content data. + type: The content type. """ type: str @@ -107,9 +107,9 @@ class ClientToolAction(Action): """A client-side tool definition that can be called by the agent. Args: - name (str): The name of the client tool to call. - description (str): A description of what the function does. The language model will use this description when selecting the function and interpreting its parameters. - parameters (dict[str, object]): The parameters the function accepts, described as a JSON Schema object. + name: The name of the client tool to call. + description: A description of what the function does. The language model will use this description when selecting the function and interpreting its parameters. + parameters: The parameters the function accepts, described as a JSON Schema object. """ _type: ClassVar[str] = "clientTool" @@ -144,8 +144,8 @@ class UnknownAction(Action): """Unknown action type for forward compatibility. Args: - type (str): The action type. - data (dict[str, object]): The raw action data. + type: The action type. + data: The raw action data. """ type: str @@ -196,9 +196,9 @@ class ClientToolCall(ActionCall): """A client tool call requested by the agent. Args: - action_id (str): The unique identifier for this action call. - name (str): The name of the client tool being called. - arguments (dict[str, object]): The parsed arguments for the tool call. + action_id: The unique identifier for this action call. + name: The name of the client tool being called. + arguments: The parsed arguments for the tool call. """ _type: ClassVar[str] = "clientTool" @@ -232,13 +232,13 @@ class ToolConfirmationCall(ActionCall): """A tool confirmation request from the agent. Args: - action_id (str): The unique identifier for this action call. - content (MessageContent): The confirmation message content. - tool_name (str): The name of the tool requiring confirmation. - tool_arguments (dict[str, object]): The arguments for the tool call. - tool_description (str): Description of what the tool does. - tool_type (str): The type of tool (e.g., "runPythonCode", "callRestApi"). - details (dict[str, object] | None): Optional additional details about the tool call. + action_id: The unique identifier for this action call. + content: The confirmation message content. + tool_name: The name of the tool requiring confirmation. + tool_arguments: The arguments for the tool call. + tool_description: Description of what the tool does. + tool_type: The type of tool (e.g., "runPythonCode", "callRestApi"). + details: Optional additional details about the tool call. """ _type: ClassVar[str] = "toolConfirmation" @@ -286,9 +286,9 @@ class UnknownActionCall(ActionCall): """Unknown action call type for forward compatibility. Args: - action_id (str): The unique identifier for this action call. - type (str): The action call type. - data (dict[str, object]): The raw action call data. + action_id: The unique identifier for this action call. + type: The action call type. + data: The raw action call data. """ action_id: str @@ -321,9 +321,9 @@ class Message(CogniteResource): """A message to send to an agent. Args: - content (str | MessageContent): The message content. If a string is provided, + content: The message content. If a string is provided, it will be converted to TextContent. - role (Literal["user"]): The role of the message sender. Defaults to "user". + role: The role of the message sender. Defaults to "user". """ content: MessageContent @@ -368,9 +368,9 @@ class ClientToolResult(ActionResult): """Result of executing a client tool, for sending back to the agent. Args: - action_id (str): The ID of the action being responded to. - content (str | MessageContent): The result of executing the action. - data (list[Any] | None): Optional structured data. + action_id: The ID of the action being responded to. + content: The result of executing the action. + data: Optional structured data. """ _type: ClassVar[str] = "clientTool" @@ -407,8 +407,8 @@ class ToolConfirmationResult(ActionResult): """Result of a tool confirmation request. Args: - action_id (str): The ID of the action being responded to. - status (Literal["ALLOW", "DENY"]): Whether to allow or deny the tool execution. + action_id: The ID of the action being responded to. + status: Whether to allow or deny the tool execution. """ _type: ClassVar[str] = "toolConfirmation" @@ -436,8 +436,8 @@ class AgentDataItem(CogniteResource): """Data item in agent response. Args: - type (str): The type of data item. - data (dict[str, Any]): The data payload. + type: The type of data item. + data: The data payload. """ type: str @@ -461,7 +461,7 @@ class AgentReasoningItem(CogniteResource): """Reasoning item in agent response. Args: - content (list[MessageContent]): The reasoning content. + content: The reasoning content. """ content: list[MessageContent] @@ -482,11 +482,11 @@ class AgentMessage(CogniteResource): """A message from an agent. Args: - content (MessageContent | None): The message content. - data (list[AgentDataItem] | None): Data items in the response. - reasoning (list[AgentReasoningItem] | None): Reasoning items in the response. - actions (list[ActionCall] | None): Action calls requested by the agent. - role (Literal["agent"]): The role of the message sender. + content: The message content. + data: Data items in the response. + reasoning: Reasoning items in the response. + actions: Action calls requested by the agent. + role: The role of the message sender. """ content: MessageContent | None = None @@ -531,10 +531,10 @@ class AgentChatResponse(CogniteResource): """Response from agent chat. Args: - agent_external_id (str): The external ID of the agent. - messages (AgentMessageList): The response messages from the agent. - type (str): The response type. - cursor (str | None): Cursor for conversation continuation. + agent_external_id: The external ID of the agent. + messages: The response messages from the agent. + type: The response type. + cursor: Cursor for conversation continuation. """ def __init__( diff --git a/cognite/client/data_classes/ai.py b/cognite/client/data_classes/ai.py index 0e526bc7e4..14382f08c5 100644 --- a/cognite/client/data_classes/ai.py +++ b/cognite/client/data_classes/ai.py @@ -31,10 +31,10 @@ class Summary: A summary object consisting of a textual summary plus the id of the summarized document Args: - summary (str): The textual summary of the document - id (int | None): The id of the document - external_id (str | None): The external id of the document - instance_id (NodeId| None): The instance id of the document + summary: The textual summary of the document + id: The id of the document + external_id: The external id of the document + instance_id: The instance id of the document """ summary: str @@ -61,11 +61,11 @@ class AnswerLocation: specifies exactly where inside a document an answer can be found. Args: - page_number (int): Page number, starting with 1 - left (float): Leftmost edge of the bounding box - right (float): Rightmost edge of the bounding box - top (float): Topmost edge of the bounding box - bottom (float): Bottommost edge of the bounding box + page_number: Page number, starting with 1 + left: Leftmost edge of the bounding box + right: Rightmost edge of the bounding box + top: Topmost edge of the bounding box + bottom: Bottommost edge of the bounding box """ page_number: int @@ -99,11 +99,11 @@ class AnswerReference: where the answer was found. Args: - file_id (int): The internal id of the document - external_id (str | None): The external id of the document - instance_id (NodeId | None): The instance id of the document - file_name (str): The name of the document - locations (list[AnswerLocation]): A list of locations within the document, where the answer was found + file_id: The internal id of the document + external_id: The external id of the document + instance_id: The instance id of the document + file_name: The name of the document + locations: A list of locations within the document, where the answer was found """ file_id: int @@ -135,8 +135,8 @@ class AnswerContent: the documents containing the source material for the answer. Args: - text (str): The extracted plain text - content (list[AnswerReference]): The list of references + text: The extracted plain text + content: The list of references """ text: str @@ -163,7 +163,7 @@ class Answer: of the piece of text the answer was constructed from. Args: - content (list[AnswerContent]): The list of content objects. + content: The list of content objects. """ content: list[AnswerContent] diff --git a/cognite/client/data_classes/annotation_types/primitives.py b/cognite/client/data_classes/annotation_types/primitives.py index 20fd608c9a..a131da81e2 100644 --- a/cognite/client/data_classes/annotation_types/primitives.py +++ b/cognite/client/data_classes/annotation_types/primitives.py @@ -19,10 +19,10 @@ def dump(self, camel_case: bool = True) -> dict[str, Any]: """Dump the instance into a json serializable Python data type. Args: - camel_case (bool): Use camelCase for attribute names. Defaults to True. + camel_case: Use camelCase for attribute names. Defaults to True. Returns: - dict[str, Any]: A dictionary representation of the instance. + A dictionary representation of the instance. """ dumped = {} for k, v in vars(self).items(): @@ -42,10 +42,10 @@ def to_pandas(self, camel_case: bool = False) -> pandas.DataFrame: # type: igno """Convert the instance into a pandas DataFrame. Args: - camel_case (bool): Convert column names to camel case (e.g. `externalId` instead of `external_id`) + camel_case: Convert column names to camel case (e.g. `externalId` instead of `external_id`) Returns: - pandas.DataFrame: The dataframe. + The dataframe. """ pd = local_import("pandas") return pd.Series(self.dump(camel_case), name="value").to_frame() diff --git a/cognite/client/data_classes/annotations.py b/cognite/client/data_classes/annotations.py index 9e6cf7faa6..56005432d8 100644 --- a/cognite/client/data_classes/annotations.py +++ b/cognite/client/data_classes/annotations.py @@ -41,14 +41,14 @@ class AnnotationCore(WriteableCogniteResource["AnnotationWrite"], ABC): """Representation of an annotation in CDF. Args: - annotation_type (str): The type of the annotation. This uniquely decides what the structure of the 'data' block will be. - data (dict): The annotation information. The format of this object is decided by and validated against the 'annotation_type' attribute. - status (str): The status of the annotation, e.g. "suggested", "approved", "rejected". - creating_app (str): The name of the app from which this annotation was created. - creating_app_version (str): The version of the app that created this annotation. Must be a valid semantic versioning (SemVer) string. - creating_user (str): (str, optional): A username, or email, or name. This is not checked nor enforced. If the value is None, it means the annotation was created by a service. - annotated_resource_type (str): Type name of the CDF resource that is annotated, e.g. "file". - annotated_resource_id (int): The internal ID of the annotated resource. + annotation_type: The type of the annotation. This uniquely decides what the structure of the 'data' block will be. + data: The annotation information. The format of this object is decided by and validated against the 'annotation_type' attribute. + status: The status of the annotation, e.g. "suggested", "approved", "rejected". + creating_app: The name of the app from which this annotation was created. + creating_app_version: The version of the app that created this annotation. Must be a valid semantic versioning (SemVer) string. + creating_user: A username, or email, or name. This is not checked nor enforced. If the value is None, it means the annotation was created by a service. + annotated_resource_type: Type name of the CDF resource that is annotated, e.g. "file". + annotated_resource_id: The internal ID of the annotated resource. """ def __init__( @@ -84,17 +84,17 @@ class Annotation(AnnotationCore): This is the read version of the Annotation class. It is never to be used when creating new annotations. Args: - id (int): A server-generated ID for the object. - created_time (int): The timestamp for when the annotation was created, in milliseconds since epoch. - last_updated_time (int): The timestamp for when the annotation was last updated, in milliseconds since epoch. - annotation_type (str): The type of the annotation. This uniquely decides what the structure of the 'data' block will be. - data (dict): The annotation information. The format of this object is decided by and validated against the 'annotation_type' attribute. - status (str): The status of the annotation, e.g. "suggested", "approved", "rejected". - creating_app (str): The name of the app from which this annotation was created. - creating_app_version (str): The version of the app that created this annotation. Must be a valid semantic versioning (SemVer) string. - creating_user (str): (str, optional): A username, or email, or name. This is not checked nor enforced. If the value is None, it means the annotation was created by a service. - annotated_resource_type (str): Type name of the CDF resource that is annotated, e.g. "file". - annotated_resource_id (int): The internal ID of the annotated resource. + id: A server-generated ID for the object. + created_time: The timestamp for when the annotation was created, in milliseconds since epoch. + last_updated_time: The timestamp for when the annotation was last updated, in milliseconds since epoch. + annotation_type: The type of the annotation. This uniquely decides what the structure of the 'data' block will be. + data: The annotation information. The format of this object is decided by and validated against the 'annotation_type' attribute. + status: The status of the annotation, e.g. "suggested", "approved", "rejected". + creating_app: The name of the app from which this annotation was created. + creating_app_version: The version of the app that created this annotation. Must be a valid semantic versioning (SemVer) string. + creating_user: A username, or email, or name. This is not checked nor enforced. If the value is None, it means the annotation was created by a service. + annotated_resource_type: Type name of the CDF resource that is annotated, e.g. "file". + annotated_resource_id: The internal ID of the annotated resource. """ def __init__( @@ -162,14 +162,14 @@ class AnnotationWrite(AnnotationCore): This is the write version of the Annotation class. It is used when creating new annotations. Args: - annotation_type (AnnotationType): The type of the annotation. This uniquely decides what the structure of the 'data' block will be. - data (dict): The annotation information. The format of this object is decided by and validated against the 'annotation_type' attribute. - status (Literal['suggested', 'approved', 'rejected']): The status of the annotation, e.g. "suggested", "approved", "rejected". - creating_app (str): The name of the app from which this annotation was created. - creating_app_version (str): The version of the app that created this annotation. Must be a valid semantic versioning (SemVer) string. - creating_user (str): A username, or email, or name. This is not checked nor enforced. If the value is None, it means the annotation was created by a service. - annotated_resource_type (Literal['file', 'threedmodel']): Type name of the CDF resource that is annotated, e.g. "file". - annotated_resource_id (int): The internal ID of the annotated resource. + annotation_type: The type of the annotation. This uniquely decides what the structure of the 'data' block will be. + data: The annotation information. The format of this object is decided by and validated against the 'annotation_type' attribute. + status: The status of the annotation, e.g. "suggested", "approved", "rejected". + creating_app: The name of the app from which this annotation was created. + creating_app_version: The version of the app that created this annotation. Must be a valid semantic versioning (SemVer) string. + creating_user: A username, or email, or name. This is not checked nor enforced. If the value is None, it means the annotation was created by a service. + annotated_resource_type: Type name of the CDF resource that is annotated, e.g. "file". + annotated_resource_id: The internal ID of the annotated resource. """ def __init__( @@ -216,13 +216,13 @@ class AnnotationReverseLookupFilter(CogniteFilter): """Filter on annotations with various criteria Args: - annotated_resource_type (str): The type of the CDF resource that is annotated, e.g. "file". - status (str | None): Status of annotations to filter for, e.g. "suggested", "approved", "rejected". - creating_user (str | None): Name of the user who created the annotations to filter for. Can be set explicitly to "None" to filter for annotations created by a service. - creating_app (str | None): Name of the app from which the annotations to filter for where created. - creating_app_version (str | None): Version of the app from which the annotations to filter for were created. - annotation_type (str | None): Type name of the annotations. - data (dict[str, Any] | None): The annotation data to filter by. Example format: {"label": "cat", "confidence": 0.9} + annotated_resource_type: The type of the CDF resource that is annotated, e.g. "file". + status: Status of annotations to filter for, e.g. "suggested", "approved", "rejected". + creating_user: Name of the user who created the annotations to filter for. Can be set explicitly to "None" to filter for annotations created by a service. + creating_app: Name of the app from which the annotations to filter for where created. + creating_app_version: Version of the app from which the annotations to filter for were created. + annotation_type: Type name of the annotations. + data: The annotation data to filter by. Example format: {"label": "cat", "confidence": 0.9} """ def __init__( @@ -260,14 +260,14 @@ class AnnotationFilter(AnnotationReverseLookupFilter): """Filter on annotations with various criteria Args: - annotated_resource_type (str): The type of the CDF resource that is annotated, e.g. "file". - annotated_resource_ids (list[dict[str, int]]): List of ids of the annotated CDF resources to filter in. Example format: [{"id": 1234}, {"id": "4567"}]. Must contain at least one item. - status (str | None): Status of annotations to filter for, e.g. "suggested", "approved", "rejected". - creating_user (str | None): Name of the user who created the annotations to filter for. Can be set explicitly to "None" to filter for annotations created by a service. - creating_app (str | None): Name of the app from which the annotations to filter for where created. - creating_app_version (str | None): Version of the app from which the annotations to filter for were created. - annotation_type (str | None): Type name of the annotations. - data (dict[str, Any] | None): The annotation data to filter by. Example format: {"label": "cat", "confidence": 0.9} + annotated_resource_type: The type of the CDF resource that is annotated, e.g. "file". + annotated_resource_ids: List of ids of the annotated CDF resources to filter in. Example format: [{"id": 1234}, {"id": "4567"}]. Must contain at least one item. + status: Status of annotations to filter for, e.g. "suggested", "approved", "rejected". + creating_user: Name of the user who created the annotations to filter for. Can be set explicitly to "None" to filter for annotations created by a service. + creating_app: Name of the app from which the annotations to filter for where created. + creating_app_version: Version of the app from which the annotations to filter for were created. + annotation_type: Type name of the annotations. + data: The annotation data to filter by. Example format: {"label": "cat", "confidence": 0.9} """ def __init__( @@ -297,7 +297,7 @@ class AnnotationUpdate(CogniteUpdate): """Changes applied to annotation Args: - id (int): A server-generated ID for the object. + id: A server-generated ID for the object. """ def __init__(self, id: int) -> None: diff --git a/cognite/client/data_classes/assets.py b/cognite/client/data_classes/assets.py index 08ec97c0ab..6e31524aa6 100644 --- a/cognite/client/data_classes/assets.py +++ b/cognite/client/data_classes/assets.py @@ -62,10 +62,10 @@ class AggregateResultItem(CogniteResource): """Aggregated metrics of the asset Args: - child_count (int | None): Number of direct descendants for the asset - depth (int | None): Asset path depth (number of levels below root node). - path (list[dict[str, Any]] | None): IDs of assets on the path to the asset. - **_ (Any): No description. + child_count: Number of direct descendants for the asset + depth: Asset path depth (number of levels below root node). + path: IDs of assets on the path to the asset. + **_: No description. """ def __init__( @@ -93,21 +93,21 @@ class Asset(WriteableCogniteResourceWithClientRef["AssetWrite"]): is the read version of the Asset class, it is used when retrieving assets from the Cognite API. Args: - id (int): A server-generated ID for the object. - created_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - last_updated_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - external_id (str | None): The external ID provided by the client. Must be unique for the resource type. - name (str | None): The name of the asset. - parent_id (int | None): The parent of the node, null if it is the root node. - parent_external_id (str | None): The external ID of the parent. The property is omitted if the asset doesn't have a parent or if the parent doesn't have externalId. - description (str | None): The description of the asset. - data_set_id (int | None): The id of the dataset this asset belongs to. - metadata (dict[str, str] | None): Custom, application-specific metadata. String key -> String value. Limits: Maximum length of key is 128 bytes, value 10240 bytes, up to 256 key-value pairs, of total size at most 10240. - source (str | None): The source of the asset. - labels (list[Label] | None): A list of the labels associated with this resource item. - geo_location (GeoLocation | None): The geographic metadata of the asset. - root_id (int | None): ID of the root asset. - aggregates (AggregateResultItem | None): Aggregated metrics of the asset + id: A server-generated ID for the object. + created_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + last_updated_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + external_id: The external ID provided by the client. Must be unique for the resource type. + name: The name of the asset. + parent_id: The parent of the node, null if it is the root node. + parent_external_id: The external ID of the parent. The property is omitted if the asset doesn't have a parent or if the parent doesn't have externalId. + description: The description of the asset. + data_set_id: The id of the dataset this asset belongs to. + metadata: Custom, application-specific metadata. String key -> String value. Limits: Maximum length of key is 128 bytes, value 10240 bytes, up to 256 key-value pairs, of total size at most 10240. + source: The source of the asset. + labels: A list of the labels associated with this resource item. + geo_location: The geographic metadata of the asset. + root_id: ID of the root asset. + aggregates: Aggregated metrics of the asset """ def __init__( @@ -192,7 +192,7 @@ async def parent_async(self) -> Asset: """Returns this asset's parent. Returns: - Asset: The parent asset. + The parent asset. """ if self.parent_id is None: raise ValueError("parent_id is None, is this a root asset?") @@ -206,7 +206,7 @@ async def children_async(self) -> AssetList: """Returns the children of this asset. Returns: - AssetList: The requested assets + The requested assets """ if self.id is None: raise ValueError("Unable to fetch child assets: id is missing") @@ -220,10 +220,10 @@ async def subtree_async(self, depth: int | None = None) -> AssetList: """Returns the subtree of this asset up to a specified depth. Args: - depth (int | None): Retrieve assets up to this depth below the asset. + depth: Retrieve assets up to this depth below the asset. Returns: - AssetList: The requested assets sorted topologically. + The requested assets sorted topologically. """ if self.id is None: raise ValueError("Unable to fetch asset subtree: id is missing") @@ -237,9 +237,9 @@ async def time_series_async(self, **kwargs: Any) -> TimeSeriesList: """Retrieve all time series related to this asset. Args: - **kwargs (Any): All extra keyword arguments are passed to time_series/list. + **kwargs: All extra keyword arguments are passed to time_series/list. Returns: - TimeSeriesList: All time series related to this asset. + All time series related to this asset. """ asset_ids = self._prepare_asset_ids("time series", kwargs) return await self._cognite_client.time_series.list(asset_ids=asset_ids, **kwargs) @@ -252,9 +252,9 @@ async def sequences_async(self, **kwargs: Any) -> SequenceList: """Retrieve all sequences related to this asset. Args: - **kwargs (Any): All extra keyword arguments are passed to sequences/list. + **kwargs: All extra keyword arguments are passed to sequences/list. Returns: - SequenceList: All sequences related to this asset. + All sequences related to this asset. """ asset_ids = self._prepare_asset_ids("sequences", kwargs) return await self._cognite_client.sequences.list(asset_ids=asset_ids, **kwargs) @@ -267,9 +267,9 @@ async def events_async(self, **kwargs: Any) -> EventList: """Retrieve all events related to this asset. Args: - **kwargs (Any): All extra keyword arguments are passed to events/list. + **kwargs: All extra keyword arguments are passed to events/list. Returns: - EventList: All events related to this asset. + All events related to this asset. """ asset_ids = self._prepare_asset_ids("events", kwargs) return await self._cognite_client.events.list(asset_ids=asset_ids, **kwargs) @@ -282,9 +282,9 @@ async def files_async(self, **kwargs: Any) -> FileMetadataList: """Retrieve all files metadata related to this asset. Args: - **kwargs (Any): All extra keyword arguments are passed to files/list. + **kwargs: All extra keyword arguments are passed to files/list. Returns: - FileMetadataList: Metadata about all files related to this asset. + Metadata about all files related to this asset. """ asset_ids = self._prepare_asset_ids("files", kwargs) return await self._cognite_client.files.list(asset_ids=asset_ids, **kwargs) @@ -321,16 +321,16 @@ def to_pandas( # type: ignore [override] """Convert the instance into a pandas DataFrame. Args: - expand_metadata (bool): Expand the metadata into separate rows (default: False). - metadata_prefix (str): Prefix to use for the metadata rows, if expanded. - expand_aggregates (bool): Expand the aggregates into separate rows (default: False). - aggregates_prefix (str): Prefix to use for the aggregates rows, if expanded. - ignore (list[str] | None): List of row keys to skip when converting to a data frame. Is applied before expansions. - camel_case (bool): Convert attribute names to camel case (e.g. `externalId` instead of `external_id`). Does not affect custom data like metadata if expanded. - convert_timestamps (bool): Convert known attributes storing CDF timestamps (milliseconds since epoch) to datetime. Does not affect custom data like metadata. + expand_metadata: Expand the metadata into separate rows (default: False). + metadata_prefix: Prefix to use for the metadata rows, if expanded. + expand_aggregates: Expand the aggregates into separate rows (default: False). + aggregates_prefix: Prefix to use for the aggregates rows, if expanded. + ignore: List of row keys to skip when converting to a data frame. Is applied before expansions. + camel_case: Convert attribute names to camel case (e.g. `externalId` instead of `external_id`). Does not affect custom data like metadata if expanded. + convert_timestamps: Convert known attributes storing CDF timestamps (milliseconds since epoch) to datetime. Does not affect custom data like metadata. Returns: - pandas.DataFrame: The dataframe. + The dataframe. """ df = super().to_pandas( expand_metadata=expand_metadata, @@ -353,16 +353,16 @@ class AssetWrite(WriteableCogniteResource["AssetWrite"]): write version of the Asset class, and is used when inserting new assets. Args: - name (str): The name of the asset. - external_id (str | None): The external ID provided by the client. Must be unique for the resource type. - parent_id (int | None): The parent of the node, null if it is the root node. - parent_external_id (str | None): The external ID of the parent. The property is omitted if the asset doesn't have a parent or if the parent doesn't have externalId. - description (str | None): The description of the asset. - data_set_id (int | None): The id of the dataset this asset belongs to. - metadata (dict[str, str] | None): Custom, application-specific metadata. String key -> String value. Limits: Maximum length of key is 128 bytes, value 10240 bytes, up to 256 key-value pairs, of total size at most 10240. - source (str | None): The source of the asset. - labels (list[Label | str | LabelDefinitionWrite | dict] | None): A list of the labels associated with this resource item. - geo_location (GeoLocation | None): The geographic metadata of the asset. + name: The name of the asset. + external_id: The external ID provided by the client. Must be unique for the resource type. + parent_id: The parent of the node, null if it is the root node. + parent_external_id: The external ID of the parent. The property is omitted if the asset doesn't have a parent or if the parent doesn't have externalId. + description: The description of the asset. + data_set_id: The id of the dataset this asset belongs to. + metadata: Custom, application-specific metadata. String key -> String value. Limits: Maximum length of key is 128 bytes, value 10240 bytes, up to 256 key-value pairs, of total size at most 10240. + source: The source of the asset. + labels: A list of the labels associated with this resource item. + geo_location: The geographic metadata of the asset. """ def __init__( @@ -423,8 +423,8 @@ class AssetUpdate(CogniteUpdate): """Changes applied to asset Args: - id (int): A server-generated ID for the object. - external_id (str): The external ID provided by the client. Must be unique for the resource type. + id: A server-generated ID for the object. + external_id: The external ID provided by the client. Must be unique for the resource type. """ class _PrimitiveAssetUpdate(CognitePrimitiveUpdate): @@ -532,9 +532,9 @@ async def time_series_async(self, **kwargs: Any) -> TimeSeriesList: """Retrieve all time series related to these assets. Args: - **kwargs (Any): All extra keyword arguments are passed to time_series/list. Note: 'partitions' and 'limit' can not be used. + **kwargs: All extra keyword arguments are passed to time_series/list. Note: 'partitions' and 'limit' can not be used. Returns: - TimeSeriesList: All time series related to the assets in this AssetList. + All time series related to the assets in this AssetList. """ from cognite.client.data_classes import TimeSeriesList @@ -548,9 +548,9 @@ async def sequences_async(self, **kwargs: Any) -> SequenceList: """Retrieve all sequences related to these assets. Args: - **kwargs (Any): All extra keyword arguments are passed to sequences/list. Note: 'limit' can not be used. + **kwargs: All extra keyword arguments are passed to sequences/list. Note: 'limit' can not be used. Returns: - SequenceList: All sequences related to the assets in this AssetList. + All sequences related to the assets in this AssetList. """ from cognite.client.data_classes import SequenceList @@ -564,9 +564,9 @@ async def events_async(self, **kwargs: Any) -> EventList: """Retrieve all events related to these assets. Args: - **kwargs (Any): All extra keyword arguments are passed to events/list. Note: 'sort', 'partitions' and 'limit' can not be used. + **kwargs: All extra keyword arguments are passed to events/list. Note: 'sort', 'partitions' and 'limit' can not be used. Returns: - EventList: All events related to the assets in this AssetList. + All events related to the assets in this AssetList. """ from cognite.client.data_classes import EventList @@ -580,9 +580,9 @@ async def files_async(self, **kwargs: Any) -> FileMetadataList: """Retrieve all files metadata related to these assets. Args: - **kwargs (Any): All extra keyword arguments are passed to files/list. Note: 'limit' can not be used. + **kwargs: All extra keyword arguments are passed to files/list. Note: 'limit' can not be used. Returns: - FileMetadataList: Metadata about all files related to the assets in this AssetList. + Metadata about all files related to the assets in this AssetList. """ from cognite.client.data_classes import FileMetadataList @@ -623,19 +623,19 @@ class AssetFilter(CogniteFilter): """Filter on assets with strict matching. Args: - name (str | None): The name of the asset. - parent_ids (Sequence[int] | None): Return only the direct descendants of the specified assets. - parent_external_ids (SequenceNotStr[str] | None): Return only the direct descendants of the specified assets. - asset_subtree_ids (Sequence[dict[str, Any]] | None): Only include assets in subtrees rooted at the specified asset IDs and external IDs. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. - data_set_ids (Sequence[dict[str, Any]] | None): No description. - metadata (dict[str, str] | None): Custom, application specific metadata. String key -> String value. Limits: Maximum length of key is 128 bytes, value 10240 bytes, up to 256 key-value pairs, of total size at most 10240. - source (str | None): The source of the asset. - created_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. - last_updated_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. - root (bool | None): Whether the filtered assets are root assets, or not. Set to True to only list root assets. - external_id_prefix (str | None): Filter by this (case-sensitive) prefix for the external ID. - labels (LabelFilter | None): Return only the resource matching the specified label constraints. - geo_location (GeoLocationFilter | None): Only include files matching the specified geographic relation. + name: The name of the asset. + parent_ids: Return only the direct descendants of the specified assets. + parent_external_ids: Return only the direct descendants of the specified assets. + asset_subtree_ids: Only include assets in subtrees rooted at the specified asset IDs and external IDs. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. + data_set_ids: No description. + metadata: Custom, application specific metadata. String key -> String value. Limits: Maximum length of key is 128 bytes, value 10240 bytes, up to 256 key-value pairs, of total size at most 10240. + source: The source of the asset. + created_time: Range between two timestamps. + last_updated_time: Range between two timestamps. + root: Whether the filtered assets are root assets, or not. Set to True to only list root assets. + external_id_prefix: Filter by this (case-sensitive) prefix for the external ID. + labels: Return only the resource matching the specified label constraints. + geo_location: Only include files matching the specified geographic relation. """ def __init__( @@ -691,8 +691,8 @@ class AssetHierarchy: any asset providing a parent link by ID instead of external ID, are assumed valid. Args: - assets (Sequence[AssetWrite]): Sequence of assets to be inspected for validity. - ignore_orphans (bool): If true, orphan assets are assumed valid and won't raise. + assets: Sequence of assets to be inspected for validity. + ignore_orphans: If true, orphan assets are assumed valid and won't raise. Examples: @@ -831,7 +831,7 @@ def groupby_parent_xid(self) -> dict[str | None, list[AssetWrite]]: The same is true for all assets linking its parent by ID. Returns: - dict[str | None, list[AssetWrite]]: No description.""" + No description.""" self.is_valid(on_error="raise") # Sort (on parent) as required by groupby. This is tricky as we need to avoid comparing string with None, @@ -866,10 +866,10 @@ def count_subtree(self, mapping: dict[str | None, list[AssetWrite]]) -> dict[str """Returns a mapping from asset external ID to the size of its subtree (children and children of children etc.). Args: - mapping (dict[str | None, list[AssetWrite]]): The mapping returned by `groupby_parent_xid()`. If None is passed, will be recreated (slightly expensive). + mapping: The mapping returned by `groupby_parent_xid()`. If None is passed, will be recreated (slightly expensive). Returns: - dict[str, int]: Lookup from external ID to descendant count. + Lookup from external ID to descendant count. """ if mapping is None: mapping = self.groupby_parent_xid() diff --git a/cognite/client/data_classes/contextualization.py b/cognite/client/data_classes/contextualization.py index 2c73ebe33d..0f2d1b2a83 100644 --- a/cognite/client/data_classes/contextualization.py +++ b/cognite/client/data_classes/contextualization.py @@ -123,8 +123,8 @@ async def wait_for_completion_async(self, timeout: float | None = None, interval """Waits for job completion. This is generally not needed to call directly, as `.result` will do so automatically. Args: - timeout (float | None): Time out after this many seconds. (None means wait indefinitely) - interval (float): Influence how often to poll status (seconds). + timeout: Time out after this many seconds. (None means wait indefinitely) + interval: Influence how often to poll status (seconds). Raises: CogniteModelFailedError: The model fit failed. @@ -255,8 +255,8 @@ async def wait_for_completion_async(self, timeout: int | None = None, interval: """Waits for model completion. This is generally not needed to call directly, as `.result` will do so automatically. Args: - timeout (int | None): Time out after this many seconds. (None means wait indefinitely) - interval (int): Influence how often to poll status (seconds). + timeout: Time out after this many seconds. (None means wait indefinitely) + interval: Influence how often to poll status (seconds). Raises: CogniteModelFailedError: The model fit failed. @@ -290,13 +290,13 @@ async def predict_async( Blocks and waits for the model to be ready if it has been recently created. Args: - sources (list[dict] | None): entities to match from, does not need an 'id' field. Tolerant to passing more than is needed or used (e.g. json dump of time series list). If omitted, will use data from fit. - targets (list[dict] | None): entities to match to, does not need an 'id' field. Tolerant to passing more than is needed or used. If omitted, will use data from fit. - num_matches (int): number of matches to return for each item. - score_threshold (float | None): only return matches with a score above this threshold + sources: entities to match from, does not need an 'id' field. Tolerant to passing more than is needed or used (e.g. json dump of time series list). If omitted, will use data from fit. + targets: entities to match to, does not need an 'id' field. Tolerant to passing more than is needed or used. If omitted, will use data from fit. + num_matches: number of matches to return for each item. + score_threshold: only return matches with a score above this threshold Returns: - EntityMatchingPredictionResult: object which can be used to wait for and retrieve results.""" + object which can be used to wait for and retrieve results.""" await self.wait_for_completion_async() json = { "id": self.id, @@ -332,9 +332,9 @@ async def refit_async(self, true_matches: Sequence[dict | tuple[int | str, int | """Re-fits an entity matching model, using the combination of the old and new true matches. Args: - true_matches (Sequence[dict | tuple[int | str, int | str]]): Updated known valid matches given as a list of dicts with keys 'fromId', 'fromExternalId', 'toId', 'toExternalId'). A tuple can be used instead of the dictionary for convenience, interpreted as id/externalId based on type. + true_matches: Updated known valid matches given as a list of dicts with keys 'fromId', 'fromExternalId', 'toId', 'toExternalId'). A tuple can be used instead of the dictionary for convenience, interpreted as id/externalId based on type. Returns: - EntityMatchingModel: new model refitted to true_matches.""" + new model refitted to true_matches.""" true_matches = [convert_true_match(true_match) for true_match in true_matches] await self.wait_for_completion_async() semaphore = self._cognite_client.entity_matching._get_semaphore("write") @@ -367,8 +367,8 @@ class EntityMatchingModelUpdate(CogniteUpdate): """Changes applied to entity matching model Args: - id (int): A server-generated ID for the object. - external_id (str): The external ID provided by the client. Must be unique for the resource type. + id: A server-generated ID for the object. + external_id: The external ID provided by the client. Must be unique for the resource type. """ class _PrimitiveUpdate(CognitePrimitiveUpdate): @@ -483,10 +483,10 @@ def to_pandas(self, camel_case: bool = False) -> pandas.DataFrame: # type: igno """Convert the instance into a pandas DataFrame. Args: - camel_case (bool): Convert column names to camel case (e.g. `externalId` instead of `external_id`) + camel_case: Convert column names to camel case (e.g. `externalId` instead of `external_id`) Returns: - pandas.DataFrame: The dataframe. + The dataframe. """ df = super().to_pandas(camel_case=camel_case) df.loc["results"] = f"{len(df['results'])} pages" @@ -607,10 +607,10 @@ def to_pandas(self, camel_case: bool = False) -> pandas.DataFrame: # type: igno """Convert the instance into a pandas DataFrame. Args: - camel_case (bool): Convert column names to camel case (e.g. `externalId` instead of `external_id`) + camel_case: Convert column names to camel case (e.g. `externalId` instead of `external_id`) Returns: - pandas.DataFrame: The dataframe. + The dataframe. """ df = super().to_pandas(camel_case=camel_case) df.loc["annotations"] = f"{len(self.annotations or [])} annotations" @@ -965,7 +965,7 @@ async def wait_for_completion_async(self, timeout: int | None = None) -> None: """Waits for all jobs to complete, generally not needed to call as it is called by result. Args: - timeout (int | None): Time out after this many seconds. (None means wait indefinitely) + timeout: Time out after this many seconds. (None means wait indefinitely) """ start = time.time() self._remaining_job_ids = self.job_ids @@ -1234,11 +1234,11 @@ async def save_predictions_async( See https://docs.cognite.com/api/v1/#operation/annotationsSuggest Args: - creating_user (str): (str, optional): A username, or email, or name. - creating_app (str | None): The name of the app from which this annotation was created. Defaults to 'cognite-sdk-python'. - creating_app_version (str | None): The version of the app that created this annotation. Must be a valid semantic versioning (SemVer) string. Defaults to client version. + creating_user: A username, or email, or name. + creating_app: The name of the app from which this annotation was created. Defaults to 'cognite-sdk-python'. + creating_app_version: The version of the app that created this annotation. Must be a valid semantic versioning (SemVer) string. Defaults to client version. Returns: - Annotation | AnnotationList: (suggested) annotation(s) stored in CDF. + (suggested) annotation(s) stored in CDF. """ if JobStatus(self.status) is JobStatus.COMPLETED: @@ -1346,10 +1346,10 @@ class DirectionWeights(CogniteResource): larger distance is allowed. Args: - left (float | None): Weight for the connection towards text boxes to the left. - right (float | None): Weight for the connection towards text boxes to the right. - up (float | None): Weight for the connection towards text boxes above. - down (float | None): Weight for the connection towards text boxes below. + left: Weight for the connection towards text boxes to the left. + right: Weight for the connection towards text boxes to the right. + up: Weight for the connection towards text boxes above. + down: Weight for the connection towards text boxes below. """ def __init__( @@ -1378,9 +1378,9 @@ class CustomizeFuzziness(CogniteResource): """Additional requirements for the fuzzy matching algorithm. The fuzzy match is allowed if any of these are true for each match candidate. The overall minFuzzyScore still applies, but a stricter fuzzyScore can be set here, which would not be enforced if either the minChars or maxBoxes conditions are met, making it possible to exclude detections using replacements if they are either short, or combined from many boxes. Args: - fuzzy_score (float | None): The minimum fuzzy score of the candidate match. - max_boxes (int | None): Maximum number of text boxes the potential match is composed of. - min_chars (int | None): The minimum number of characters that must be present in the candidate match string. + fuzzy_score: The minimum fuzzy score of the candidate match. + max_boxes: Maximum number of text boxes the potential match is composed of. + min_chars: The minimum number of characters that must be present in the candidate match string. """ def __init__( @@ -1406,9 +1406,9 @@ class ConnectionFlags: """Connection flags for token graph. These are passed as an array of strings to the API. Only flags set to True are included in the array. There is no need to set any flags to False. Args: - natural_reading_order (bool): Only connect text regions that are in natural reading order (i.e. top to bottom and left to right). - no_text_inbetween (bool): Only connect text regions that are not separated by other text regions. - **flags (bool): Other flags. + natural_reading_order: Only connect text regions that are in natural reading order (i.e. top to bottom and left to right). + no_text_inbetween: Only connect text regions that are not separated by other text regions. + **flags: Other flags. """ def __init__( @@ -1441,17 +1441,17 @@ class DiagramDetectConfig(CogniteResource): """`Configuration options for the diagrams/detect endpoint `_. Args: - annotation_extract (bool | None): Read SHX text embedded in the diagram file. If present, this text will override overlapping OCR text. Cannot be used at the same time as read_embedded_text. - case_sensitive (bool | None): Case sensitive text matching. Defaults to True. - connection_flags (ConnectionFlags | list[str] | None): Connection flags for token graph. Two flags are supported thus far: `no_text_inbetween` and `natural_reading_order`. - customize_fuzziness (CustomizeFuzziness | dict[str, Any] | None): Additional requirements for the fuzzy matching algorithm. The fuzzy match is allowed if any of these are true for each match candidate. The overall minFuzzyScore still applies, but a stricter fuzzyScore can be set here, which would not be enforced if either the minChars or maxBoxes conditions are met, making it possible to exclude detections using replacements if they are either short, or combined from many boxes. - direction_delta (float | None): Maximum angle between the direction of two text boxes for them to be connected. Directions are currently multiples of 90 degrees. - direction_weights (DirectionWeights | dict[str, Any] | None): Direction weights that control how far subsequent ocr text boxes can be from another in a particular direction and still be combined into the same detection. Lower value means larger distance is allowed. The direction is relative to the text orientation. - min_fuzzy_score (float | None): For each detection, this controls to which degree characters can be replaced from the OCR text with similar characters, e.g. I and 1. A value of 1 will disable character replacements entirely. - read_embedded_text (bool | None): Read text embedded in the PDF file. If present, this text will override overlapping OCR text. - remove_leading_zeros (bool | None): Disregard leading zeroes when matching tags (e.g. "A0001" will match "A1") - substitutions (dict[str, list[str]] | None): Override the default mapping of characters to an array of allowed substitute characters. The default mapping contains characters commonly confused by OCR. Provide your custom mapping in the format like so: {"0": ["O", "Q"], "1": ["l", "I"]}. This means: 0 (zero) is allowed to be replaced by uppercase letter O or Q, and 1 (one) is allowed to be replaced by lowercase letter l or uppercase letter I. No other replacements are allowed. - **params (Any): Other parameters. The parameter name will be converted to camel case but the value will be passed as is. + annotation_extract: Read SHX text embedded in the diagram file. If present, this text will override overlapping OCR text. Cannot be used at the same time as read_embedded_text. + case_sensitive: Case sensitive text matching. Defaults to True. + connection_flags: Connection flags for token graph. Two flags are supported thus far: `no_text_inbetween` and `natural_reading_order`. + customize_fuzziness: Additional requirements for the fuzzy matching algorithm. The fuzzy match is allowed if any of these are true for each match candidate. The overall minFuzzyScore still applies, but a stricter fuzzyScore can be set here, which would not be enforced if either the minChars or maxBoxes conditions are met, making it possible to exclude detections using replacements if they are either short, or combined from many boxes. + direction_delta: Maximum angle between the direction of two text boxes for them to be connected. Directions are currently multiples of 90 degrees. + direction_weights: Direction weights that control how far subsequent ocr text boxes can be from another in a particular direction and still be combined into the same detection. Lower value means larger distance is allowed. The direction is relative to the text orientation. + min_fuzzy_score: For each detection, this controls to which degree characters can be replaced from the OCR text with similar characters, e.g. I and 1. A value of 1 will disable character replacements entirely. + read_embedded_text: Read text embedded in the PDF file. If present, this text will override overlapping OCR text. + remove_leading_zeros: Disregard leading zeroes when matching tags (e.g. "A0001" will match "A1") + substitutions: Override the default mapping of characters to an array of allowed substitute characters. The default mapping contains characters commonly confused by OCR. Provide your custom mapping in the format like so: {"0": ["O", "Q"], "1": ["l", "I"]}. This means: 0 (zero) is allowed to be replaced by uppercase letter O or Q, and 1 (one) is allowed to be replaced by lowercase letter l or uppercase letter I. No other replacements are allowed. + **params: Other parameters. The parameter name will be converted to camel case but the value will be passed as is. Example: diff --git a/cognite/client/data_classes/data_modeling/cdm/v1.py b/cognite/client/data_classes/data_modeling/cdm/v1.py index 1440b21408..542b3a2488 100644 --- a/cognite/client/data_classes/data_modeling/cdm/v1.py +++ b/cognite/client/data_classes/data_modeling/cdm/v1.py @@ -40,28 +40,28 @@ class Cognite360ImageApply(_Cognite360ImageProperties, TypedNodeApply): It is used when data is written to CDF. Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite 360 image. - translation_x (float | None | Omitted): The displacement of the object along the X-axis in the 3D coordinate system - translation_y (float | None | Omitted): The displacement of the object along the Y-axis in the 3D coordinate system - translation_z (float | None | Omitted): The displacement of the object along the Z-axis in the 3D coordinate system - euler_rotation_x (float | None | Omitted): The rotation of the object around the X-axis in radians - euler_rotation_y (float | None | Omitted): The rotation of the object around the Y-axis in radians - euler_rotation_z (float | None | Omitted): The rotation of the object around the Z-axis in radians - scale_x (float | None | Omitted): The scaling factor applied to the object along the X-axis - scale_y (float | None | Omitted): The scaling factor applied to the object along the Y-axis - scale_z (float | None | Omitted): The scaling factor applied to the object along the Z-axis - front (DirectRelationReference | tuple[str, str] | None | Omitted): Direct relation to a file holding the front projection of the cube map - back (DirectRelationReference | tuple[str, str] | None | Omitted): Direct relation to a file holding the back projection of the cube map - left (DirectRelationReference | tuple[str, str] | None | Omitted): Direct relation to a file holding the left projection of the cube map - right (DirectRelationReference | tuple[str, str] | None | Omitted): Direct relation to a file holding the right projection of the cube map - top (DirectRelationReference | tuple[str, str] | None | Omitted): Direct relation to a file holding the top projection of the cube map - bottom (DirectRelationReference | tuple[str, str] | None | Omitted): Direct relation to a file holding the bottom projection of the cube map - collection_360 (DirectRelationReference | tuple[str, str] | None | Omitted): Direct relation to Cognite360ImageCollection - station_360 (DirectRelationReference | tuple[str, str] | None | Omitted): Direct relation to Cognite3DGroup instance that groups different Cognite360Image instances to the same station - taken_at (datetime | None | Omitted): The timestamp when the 6 photos were taken - existing_version (int | None): Fail the ingestion request if the node's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the node (for the specified container or node). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. - type (DirectRelationReference | tuple[str, str] | None | Omitted): Direct relation pointing to the type node. + space: The space where the node is located. + external_id: The external id of the Cognite 360 image. + translation_x: The displacement of the object along the X-axis in the 3D coordinate system + translation_y: The displacement of the object along the Y-axis in the 3D coordinate system + translation_z: The displacement of the object along the Z-axis in the 3D coordinate system + euler_rotation_x: The rotation of the object around the X-axis in radians + euler_rotation_y: The rotation of the object around the Y-axis in radians + euler_rotation_z: The rotation of the object around the Z-axis in radians + scale_x: The scaling factor applied to the object along the X-axis + scale_y: The scaling factor applied to the object along the Y-axis + scale_z: The scaling factor applied to the object along the Z-axis + front: Direct relation to a file holding the front projection of the cube map + back: Direct relation to a file holding the back projection of the cube map + left: Direct relation to a file holding the left projection of the cube map + right: Direct relation to a file holding the right projection of the cube map + top: Direct relation to a file holding the top projection of the cube map + bottom: Direct relation to a file holding the bottom projection of the cube map + collection_360: Direct relation to Cognite360ImageCollection + station_360: Direct relation to Cognite3DGroup instance that groups different Cognite360Image instances to the same station + taken_at: The timestamp when the 6 photos were taken + existing_version: Fail the ingestion request if the node's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the node (for the specified container or node). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. + type: Direct relation pointing to the type node. """ def __init__( @@ -117,31 +117,31 @@ class Cognite360Image(_Cognite360ImageProperties, TypedNode): It is used when data is read from CDF. Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite 360 image. - version (int): DMS version. - last_updated_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - created_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - translation_x (float | None): The displacement of the object along the X-axis in the 3D coordinate system - translation_y (float | None): The displacement of the object along the Y-axis in the 3D coordinate system - translation_z (float | None): The displacement of the object along the Z-axis in the 3D coordinate system - euler_rotation_x (float | None): The rotation of the object around the X-axis in radians - euler_rotation_y (float | None): The rotation of the object around the Y-axis in radians - euler_rotation_z (float | None): The rotation of the object around the Z-axis in radians - scale_x (float | None): The scaling factor applied to the object along the X-axis - scale_y (float | None): The scaling factor applied to the object along the Y-axis - scale_z (float | None): The scaling factor applied to the object along the Z-axis - front (DirectRelationReference | None): Direct relation to a file holding the front projection of the cube map - back (DirectRelationReference | None): Direct relation to a file holding the back projection of the cube map - left (DirectRelationReference | None): Direct relation to a file holding the left projection of the cube map - right (DirectRelationReference | None): Direct relation to a file holding the right projection of the cube map - top (DirectRelationReference | None): Direct relation to a file holding the top projection of the cube map - bottom (DirectRelationReference | None): Direct relation to a file holding the bottom projection of the cube map - collection_360 (DirectRelationReference | None): Direct relation to Cognite360ImageCollection - station_360 (DirectRelationReference | None): Direct relation to Cognite3DGroup instance that groups different Cognite360Image instances to the same station - taken_at (datetime | None): The timestamp when the 6 photos were taken - type (DirectRelationReference | None): Direct relation pointing to the type node. - deleted_time (int | None): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results + space: The space where the node is located. + external_id: The external id of the Cognite 360 image. + version: DMS version. + last_updated_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + created_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + translation_x: The displacement of the object along the X-axis in the 3D coordinate system + translation_y: The displacement of the object along the Y-axis in the 3D coordinate system + translation_z: The displacement of the object along the Z-axis in the 3D coordinate system + euler_rotation_x: The rotation of the object around the X-axis in radians + euler_rotation_y: The rotation of the object around the Y-axis in radians + euler_rotation_z: The rotation of the object around the Z-axis in radians + scale_x: The scaling factor applied to the object along the X-axis + scale_y: The scaling factor applied to the object along the Y-axis + scale_z: The scaling factor applied to the object along the Z-axis + front: Direct relation to a file holding the front projection of the cube map + back: Direct relation to a file holding the back projection of the cube map + left: Direct relation to a file holding the left projection of the cube map + right: Direct relation to a file holding the right projection of the cube map + top: Direct relation to a file holding the top projection of the cube map + bottom: Direct relation to a file holding the bottom projection of the cube map + collection_360: Direct relation to Cognite360ImageCollection + station_360: Direct relation to Cognite3DGroup instance that groups different Cognite360Image instances to the same station + taken_at: The timestamp when the 6 photos were taken + type: Direct relation pointing to the type node. + deleted_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results """ def __init__( @@ -237,18 +237,18 @@ class Cognite360ImageCollectionApply(_Cognite360ImageCollectionProperties, Typed Represents a logical collection of Cognite360Image instances Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite 360 image collection. - name (str | None | Omitted): Name of the instance - description (str | None | Omitted): Description of the instance - tags (list[str] | None | Omitted): Text based labels for generic use, limited to 1000 - aliases (list[str] | None | Omitted): Alternative names for the node - status (Literal['Done', 'Failed', 'Processing', 'Queued'] | None | Omitted): The status field. - published (bool | None | Omitted): The published field. - revision_type (Literal['CAD', 'Image360', 'PointCloud'] | None | Omitted): The revision type field. - model_3d (DirectRelationReference | tuple[str, str] | None | Omitted): The model 3d field. - existing_version (int | None): Fail the ingestion request if the node's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the node (for the specified container or node). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. - type (DirectRelationReference | tuple[str, str] | None | Omitted): Direct relation pointing to the type node. + space: The space where the node is located. + external_id: The external id of the Cognite 360 image collection. + name: Name of the instance + description: Description of the instance + tags: Text based labels for generic use, limited to 1000 + aliases: Alternative names for the node + status: The status field. + published: The published field. + revision_type: The revision type field. + model_3d: The model 3d field. + existing_version: Fail the ingestion request if the node's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the node (for the specified container or node). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. + type: Direct relation pointing to the type node. """ def __init__( @@ -286,21 +286,21 @@ class Cognite360ImageCollection(_Cognite360ImageCollectionProperties, TypedNode) Represents a logical collection of Cognite360Image instances Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite 360 image collection. - version (int): DMS version. - last_updated_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - created_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - name (str | None): Name of the instance - description (str | None): Description of the instance - tags (list[str] | None): Text based labels for generic use, limited to 1000 - aliases (list[str] | None): Alternative names for the node - status (Literal['Done', 'Failed', 'Processing', 'Queued'] | None): The status field. - published (bool | None): The published field. - revision_type (Literal['CAD', 'Image360', 'PointCloud'] | None): The revision type field. - model_3d (DirectRelationReference | None): The model 3d field. - type (DirectRelationReference | None): Direct relation pointing to the type node. - deleted_time (int | None): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results + space: The space where the node is located. + external_id: The external id of the Cognite 360 image collection. + version: DMS version. + last_updated_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + created_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + name: Name of the instance + description: Description of the instance + tags: Text based labels for generic use, limited to 1000 + aliases: Alternative names for the node + status: The status field. + published: The published field. + revision_type: The revision type field. + model_3d: The model 3d field. + type: Direct relation pointing to the type node. + deleted_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results """ def __init__( @@ -365,16 +365,16 @@ class Cognite360ImageModelApply(_Cognite360ImageModelProperties, TypedNodeApply) Navigational aid for traversing Cognite360ImageModel instances Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite 360 image model. - name (str | None | Omitted): Name of the instance - description (str | None | Omitted): Description of the instance - tags (list[str] | None | Omitted): Text based labels for generic use, limited to 1000 - aliases (list[str] | None | Omitted): Alternative names for the node - model_type (Literal['CAD', 'Image360', 'PointCloud'] | None | Omitted): CAD, PointCloud or Image360 - thumbnail (DirectRelationReference | tuple[str, str] | None | Omitted): Thumbnail of the 3D model - existing_version (int | None): Fail the ingestion request if the node's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the node (for the specified container or node). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. - type (DirectRelationReference | tuple[str, str] | None | Omitted): Direct relation pointing to the type node. + space: The space where the node is located. + external_id: The external id of the Cognite 360 image model. + name: Name of the instance + description: Description of the instance + tags: Text based labels for generic use, limited to 1000 + aliases: Alternative names for the node + model_type: CAD, PointCloud or Image360 + thumbnail: Thumbnail of the 3D model + existing_version: Fail the ingestion request if the node's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the node (for the specified container or node). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. + type: Direct relation pointing to the type node. """ def __init__( @@ -408,19 +408,19 @@ class Cognite360ImageModel(_Cognite360ImageModelProperties, TypedNode): Navigational aid for traversing Cognite360ImageModel instances Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite 360 image model. - version (int): DMS version. - last_updated_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - created_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - name (str | None): Name of the instance - description (str | None): Description of the instance - tags (list[str] | None): Text based labels for generic use, limited to 1000 - aliases (list[str] | None): Alternative names for the node - model_type (Literal['CAD', 'Image360', 'PointCloud'] | None): CAD, PointCloud or Image360 - thumbnail (DirectRelationReference | None): Thumbnail of the 3D model - type (DirectRelationReference | None): Direct relation pointing to the type node. - deleted_time (int | None): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results + space: The space where the node is located. + external_id: The external id of the Cognite 360 image model. + version: DMS version. + last_updated_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + created_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + name: Name of the instance + description: Description of the instance + tags: Text based labels for generic use, limited to 1000 + aliases: Alternative names for the node + model_type: CAD, PointCloud or Image360 + thumbnail: Thumbnail of the 3D model + type: Direct relation pointing to the type node. + deleted_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results """ def __init__( @@ -479,15 +479,15 @@ class Cognite360ImageStationApply(_Cognite360ImageStationProperties, TypedNodeAp A way to group images across collections. Used for creating visual scan history Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite 360 image station. - name (str | None | Omitted): Name of the instance - description (str | None | Omitted): Description of the instance - tags (list[str] | None | Omitted): Text based labels for generic use, limited to 1000 - aliases (list[str] | None | Omitted): Alternative names for the node - group_type (Literal['Station360'] | None | Omitted): Type of group - existing_version (int | None): Fail the ingestion request if the node's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the node (for the specified container or node). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. - type (DirectRelationReference | tuple[str, str] | None | Omitted): Direct relation pointing to the type node. + space: The space where the node is located. + external_id: The external id of the Cognite 360 image station. + name: Name of the instance + description: Description of the instance + tags: Text based labels for generic use, limited to 1000 + aliases: Alternative names for the node + group_type: Type of group + existing_version: Fail the ingestion request if the node's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the node (for the specified container or node). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. + type: Direct relation pointing to the type node. """ def __init__( @@ -519,18 +519,18 @@ class Cognite360ImageStation(_Cognite360ImageStationProperties, TypedNode): A way to group images across collections. Used for creating visual scan history Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite 360 image station. - version (int): DMS version. - last_updated_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - created_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - name (str | None): Name of the instance - description (str | None): Description of the instance - tags (list[str] | None): Text based labels for generic use, limited to 1000 - aliases (list[str] | None): Alternative names for the node - group_type (Literal['Station360'] | None): Type of group - type (DirectRelationReference | None): Direct relation pointing to the type node. - deleted_time (int | None): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results + space: The space where the node is located. + external_id: The external id of the Cognite 360 image station. + version: DMS version. + last_updated_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + created_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + name: Name of the instance + description: Description of the instance + tags: Text based labels for generic use, limited to 1000 + aliases: Alternative names for the node + group_type: Type of group + type: Direct relation pointing to the type node. + deleted_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results """ def __init__( @@ -586,16 +586,16 @@ class Cognite3DModelApply(_Cognite3DModelProperties, TypedNodeApply): Groups revisions of 3D data of various kinds together (CAD, PointCloud, Image360) Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite 3D model. - name (str | None | Omitted): Name of the instance - description (str | None | Omitted): Description of the instance - tags (list[str] | None | Omitted): Text based labels for generic use, limited to 1000 - aliases (list[str] | None | Omitted): Alternative names for the node - model_type (Literal['CAD', 'Image360', 'PointCloud'] | None | Omitted): CAD, PointCloud or Image360 - thumbnail (DirectRelationReference | tuple[str, str] | None | Omitted): Thumbnail of the 3D model - existing_version (int | None): Fail the ingestion request if the node's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the node (for the specified container or node). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. - type (DirectRelationReference | tuple[str, str] | None | Omitted): Direct relation pointing to the type node. + space: The space where the node is located. + external_id: The external id of the Cognite 3D model. + name: Name of the instance + description: Description of the instance + tags: Text based labels for generic use, limited to 1000 + aliases: Alternative names for the node + model_type: CAD, PointCloud or Image360 + thumbnail: Thumbnail of the 3D model + existing_version: Fail the ingestion request if the node's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the node (for the specified container or node). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. + type: Direct relation pointing to the type node. """ def __init__( @@ -629,19 +629,19 @@ class Cognite3DModel(_Cognite3DModelProperties, TypedNode): Groups revisions of 3D data of various kinds together (CAD, PointCloud, Image360) Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite 3D model. - version (int): DMS version. - last_updated_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - created_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - name (str | None): Name of the instance - description (str | None): Description of the instance - tags (list[str] | None): Text based labels for generic use, limited to 1000 - aliases (list[str] | None): Alternative names for the node - model_type (Literal['CAD', 'Image360', 'PointCloud'] | None): CAD, PointCloud or Image360 - thumbnail (DirectRelationReference | None): Thumbnail of the 3D model - type (DirectRelationReference | None): Direct relation pointing to the type node. - deleted_time (int | None): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results + space: The space where the node is located. + external_id: The external id of the Cognite 3D model. + version: DMS version. + last_updated_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + created_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + name: Name of the instance + description: Description of the instance + tags: Text based labels for generic use, limited to 1000 + aliases: Alternative names for the node + model_type: CAD, PointCloud or Image360 + thumbnail: Thumbnail of the 3D model + type: Direct relation pointing to the type node. + deleted_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results """ def __init__( @@ -705,20 +705,20 @@ class Cognite3DObjectApply(_Cognite3DObjectProperties, TypedNodeApply): This is the virtual position representation of an object in the physical world, connecting an asset to one or more 3D resources Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite 3D object. - name (str | None | Omitted): Name of the instance - description (str | None | Omitted): Description of the instance - tags (list[str] | None | Omitted): Text based labels for generic use, limited to 1000 - aliases (list[str] | None | Omitted): Alternative names for the node - x_min (float | None | Omitted): Lowest X value in bounding box - x_max (float | None | Omitted): Highest X value in bounding box - y_min (float | None | Omitted): Lowest Y value in bounding box - y_max (float | None | Omitted): Highest Y value in bounding box - z_min (float | None | Omitted): Lowest Z value in bounding box - z_max (float | None | Omitted): Highest Z value in bounding box - existing_version (int | None): Fail the ingestion request if the node's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the node (for the specified container or node). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. - type (DirectRelationReference | tuple[str, str] | None | Omitted): Direct relation pointing to the type node. + space: The space where the node is located. + external_id: The external id of the Cognite 3D object. + name: Name of the instance + description: Description of the instance + tags: Text based labels for generic use, limited to 1000 + aliases: Alternative names for the node + x_min: Lowest X value in bounding box + x_max: Highest X value in bounding box + y_min: Lowest Y value in bounding box + y_max: Highest Y value in bounding box + z_min: Lowest Z value in bounding box + z_max: Highest Z value in bounding box + existing_version: Fail the ingestion request if the node's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the node (for the specified container or node). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. + type: Direct relation pointing to the type node. """ def __init__( @@ -760,23 +760,23 @@ class Cognite3DObject(_Cognite3DObjectProperties, TypedNode): This is the virtual position representation of an object in the physical world, connecting an asset to one or more 3D resources Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite 3D object. - version (int): DMS version. - last_updated_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - created_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - name (str | None): Name of the instance - description (str | None): Description of the instance - tags (list[str] | None): Text based labels for generic use, limited to 1000 - aliases (list[str] | None): Alternative names for the node - x_min (float | None): Lowest X value in bounding box - x_max (float | None): Highest X value in bounding box - y_min (float | None): Lowest Y value in bounding box - y_max (float | None): Highest Y value in bounding box - z_min (float | None): Lowest Z value in bounding box - z_max (float | None): Highest Z value in bounding box - type (DirectRelationReference | None): Direct relation pointing to the type node. - deleted_time (int | None): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results + space: The space where the node is located. + external_id: The external id of the Cognite 3D object. + version: DMS version. + last_updated_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + created_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + name: Name of the instance + description: Description of the instance + tags: Text based labels for generic use, limited to 1000 + aliases: Alternative names for the node + x_min: Lowest X value in bounding box + x_max: Highest X value in bounding box + y_min: Lowest Y value in bounding box + y_max: Highest Y value in bounding box + z_min: Lowest Z value in bounding box + z_max: Highest Z value in bounding box + type: Direct relation pointing to the type node. + deleted_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results """ def __init__( @@ -848,14 +848,14 @@ class Cognite3DRevisionApply(_Cognite3DRevisionProperties, TypedNodeApply): Shared revision information for various 3D data types. Normally not used directly, but through CognitePointCloudRevision, Image360Collection or CogniteCADRevision Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite 3D revision. - status (Literal['Done', 'Failed', 'Processing', 'Queued'] | None | Omitted): The status field. - published (bool | None | Omitted): The published field. - revision_type (Literal['CAD', 'Image360', 'PointCloud'] | None | Omitted): The revision type field. - model_3d (DirectRelationReference | tuple[str, str] | None | Omitted): The model 3d field. - existing_version (int | None): Fail the ingestion request if the node's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the node (for the specified container or node). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. - type (DirectRelationReference | tuple[str, str] | None | Omitted): Direct relation pointing to the type node. + space: The space where the node is located. + external_id: The external id of the Cognite 3D revision. + status: The status field. + published: The published field. + revision_type: The revision type field. + model_3d: The model 3d field. + existing_version: Fail the ingestion request if the node's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the node (for the specified container or node). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. + type: Direct relation pointing to the type node. """ def __init__( @@ -885,17 +885,17 @@ class Cognite3DRevision(_Cognite3DRevisionProperties, TypedNode): Shared revision information for various 3D data types. Normally not used directly, but through CognitePointCloudRevision, Image360Collection or CogniteCADRevision Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite 3D revision. - version (int): DMS version. - last_updated_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - created_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - status (Literal['Done', 'Failed', 'Processing', 'Queued'] | None): The status field. - published (bool | None): The published field. - revision_type (Literal['CAD', 'Image360', 'PointCloud'] | None): The revision type field. - model_3d (DirectRelationReference | None): The model 3d field. - type (DirectRelationReference | None): Direct relation pointing to the type node. - deleted_time (int | None): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results + space: The space where the node is located. + external_id: The external id of the Cognite 3D revision. + version: DMS version. + last_updated_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + created_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + status: The status field. + published: The published field. + revision_type: The revision type field. + model_3d: The model 3d field. + type: Direct relation pointing to the type node. + deleted_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results """ def __init__( @@ -956,19 +956,19 @@ class Cognite3DTransformationNodeApply(_Cognite3DTransformationProperties, Typed The Cognite3DTransformation object defines a comprehensive 3D transformation, enabling precise adjustments to an object's position, orientation, and size in the 3D coordinate system. It allows for the translation of objects along the three spatial axes, rotation around these axes using Euler angles, and scaling along each axis to modify the object's dimensions. The object's transformation is defined in "CDF space", a coordinate system where the positive Z axis is the up direction Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite 3D transformation node. - translation_x (float | None | Omitted): The displacement of the object along the X-axis in the 3D coordinate system - translation_y (float | None | Omitted): The displacement of the object along the Y-axis in the 3D coordinate system - translation_z (float | None | Omitted): The displacement of the object along the Z-axis in the 3D coordinate system - euler_rotation_x (float | None | Omitted): The rotation of the object around the X-axis in radians - euler_rotation_y (float | None | Omitted): The rotation of the object around the Y-axis in radians - euler_rotation_z (float | None | Omitted): The rotation of the object around the Z-axis in radians - scale_x (float | None | Omitted): The scaling factor applied to the object along the X-axis - scale_y (float | None | Omitted): The scaling factor applied to the object along the Y-axis - scale_z (float | None | Omitted): The scaling factor applied to the object along the Z-axis - existing_version (int | None): Fail the ingestion request if the node's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the node (for the specified container or node). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. - type (DirectRelationReference | tuple[str, str] | None | Omitted): Direct relation pointing to the type node. + space: The space where the node is located. + external_id: The external id of the Cognite 3D transformation node. + translation_x: The displacement of the object along the X-axis in the 3D coordinate system + translation_y: The displacement of the object along the Y-axis in the 3D coordinate system + translation_z: The displacement of the object along the Z-axis in the 3D coordinate system + euler_rotation_x: The rotation of the object around the X-axis in radians + euler_rotation_y: The rotation of the object around the Y-axis in radians + euler_rotation_z: The rotation of the object around the Z-axis in radians + scale_x: The scaling factor applied to the object along the X-axis + scale_y: The scaling factor applied to the object along the Y-axis + scale_z: The scaling factor applied to the object along the Z-axis + existing_version: Fail the ingestion request if the node's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the node (for the specified container or node). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. + type: Direct relation pointing to the type node. """ def __init__( @@ -1008,22 +1008,22 @@ class Cognite3DTransformationNode(_Cognite3DTransformationProperties, TypedNode) The Cognite3DTransformation object defines a comprehensive 3D transformation, enabling precise adjustments to an object's position, orientation, and size in the 3D coordinate system. It allows for the translation of objects along the three spatial axes, rotation around these axes using Euler angles, and scaling along each axis to modify the object's dimensions. The object's transformation is defined in "CDF space", a coordinate system where the positive Z axis is the up direction Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite 3D transformation node. - version (int): DMS version. - last_updated_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - created_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - translation_x (float | None): The displacement of the object along the X-axis in the 3D coordinate system - translation_y (float | None): The displacement of the object along the Y-axis in the 3D coordinate system - translation_z (float | None): The displacement of the object along the Z-axis in the 3D coordinate system - euler_rotation_x (float | None): The rotation of the object around the X-axis in radians - euler_rotation_y (float | None): The rotation of the object around the Y-axis in radians - euler_rotation_z (float | None): The rotation of the object around the Z-axis in radians - scale_x (float | None): The scaling factor applied to the object along the X-axis - scale_y (float | None): The scaling factor applied to the object along the Y-axis - scale_z (float | None): The scaling factor applied to the object along the Z-axis - type (DirectRelationReference | None): Direct relation pointing to the type node. - deleted_time (int | None): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results + space: The space where the node is located. + external_id: The external id of the Cognite 3D transformation node. + version: DMS version. + last_updated_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + created_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + translation_x: The displacement of the object along the X-axis in the 3D coordinate system + translation_y: The displacement of the object along the Y-axis in the 3D coordinate system + translation_z: The displacement of the object along the Z-axis in the 3D coordinate system + euler_rotation_x: The rotation of the object around the X-axis in radians + euler_rotation_y: The rotation of the object around the Y-axis in radians + euler_rotation_z: The rotation of the object around the Z-axis in radians + scale_x: The scaling factor applied to the object along the X-axis + scale_y: The scaling factor applied to the object along the Y-axis + scale_z: The scaling factor applied to the object along the Z-axis + type: Direct relation pointing to the type node. + deleted_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results """ def __init__( @@ -1101,28 +1101,28 @@ class CogniteActivityApply(_CogniteActivityProperties, TypedNodeApply): Represents activities. Activities typically happen over a period and have a start and end time. Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite activity. - name (str | None | Omitted): Name of the instance - description (str | None | Omitted): Description of the instance - tags (list[str] | None | Omitted): Text based labels for generic use, limited to 1000 - aliases (list[str] | None | Omitted): Alternative names for the node - source_id (str | None | Omitted): Identifier from the source system - source_context (str | None | Omitted): Context of the source id. For systems where the sourceId is globally unique, the sourceContext is expected to not be set. - source (DirectRelationReference | tuple[str, str] | None | Omitted): Direct relation to a source system - source_created_time (datetime | None | Omitted): When the instance was created in source system (if available) - source_updated_time (datetime | None | Omitted): When the instance was last updated in the source system (if available) - source_created_user (str | None | Omitted): User identifier from the source system on who created the source data. This identifier is not guaranteed to match the user identifiers in CDF - source_updated_user (str | None | Omitted): User identifier from the source system on who last updated the source data. This identifier is not guaranteed to match the user identifiers in CDF - start_time (datetime | None | Omitted): The actual start time of an activity (or similar that extends this) - end_time (datetime | None | Omitted): The actual end time of an activity (or similar that extends this) - scheduled_start_time (datetime | None | Omitted): The planned start time of an activity (or similar that extends this) - scheduled_end_time (datetime | None | Omitted): The planned end time of an activity (or similar that extends this) - assets (list[DirectRelationReference | tuple[str, str]] | None | Omitted): A list of assets the activity is related to. - equipment (list[DirectRelationReference | tuple[str, str]] | None | Omitted): A list of equipment the activity is related to. - time_series (list[DirectRelationReference | tuple[str, str]] | None | Omitted): A list of time series the activity is related to. - existing_version (int | None): Fail the ingestion request if the node's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the node (for the specified container or node). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. - type (DirectRelationReference | tuple[str, str] | None | Omitted): Direct relation pointing to the type node. + space: The space where the node is located. + external_id: The external id of the Cognite activity. + name: Name of the instance + description: Description of the instance + tags: Text based labels for generic use, limited to 1000 + aliases: Alternative names for the node + source_id: Identifier from the source system + source_context: Context of the source id. For systems where the sourceId is globally unique, the sourceContext is expected to not be set. + source: Direct relation to a source system + source_created_time: When the instance was created in source system (if available) + source_updated_time: When the instance was last updated in the source system (if available) + source_created_user: User identifier from the source system on who created the source data. This identifier is not guaranteed to match the user identifiers in CDF + source_updated_user: User identifier from the source system on who last updated the source data. This identifier is not guaranteed to match the user identifiers in CDF + start_time: The actual start time of an activity (or similar that extends this) + end_time: The actual end time of an activity (or similar that extends this) + scheduled_start_time: The planned start time of an activity (or similar that extends this) + scheduled_end_time: The planned end time of an activity (or similar that extends this) + assets: A list of assets the activity is related to. + equipment: A list of equipment the activity is related to. + time_series: A list of time series the activity is related to. + existing_version: Fail the ingestion request if the node's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the node (for the specified container or node). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. + type: Direct relation pointing to the type node. """ def __init__( @@ -1180,31 +1180,31 @@ class CogniteActivity(_CogniteActivityProperties, TypedNode): Represents activities. Activities typically happen over a period and have a start and end time. Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite activity. - version (int): DMS version. - last_updated_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - created_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - name (str | None): Name of the instance - description (str | None): Description of the instance - tags (list[str] | None): Text based labels for generic use, limited to 1000 - aliases (list[str] | None): Alternative names for the node - source_id (str | None): Identifier from the source system - source_context (str | None): Context of the source id. For systems where the sourceId is globally unique, the sourceContext is expected to not be set. - source (DirectRelationReference | None): Direct relation to a source system - source_created_time (datetime | None): When the instance was created in source system (if available) - source_updated_time (datetime | None): When the instance was last updated in the source system (if available) - source_created_user (str | None): User identifier from the source system on who created the source data. This identifier is not guaranteed to match the user identifiers in CDF - source_updated_user (str | None): User identifier from the source system on who last updated the source data. This identifier is not guaranteed to match the user identifiers in CDF - start_time (datetime | None): The actual start time of an activity (or similar that extends this) - end_time (datetime | None): The actual end time of an activity (or similar that extends this) - scheduled_start_time (datetime | None): The planned start time of an activity (or similar that extends this) - scheduled_end_time (datetime | None): The planned end time of an activity (or similar that extends this) - assets (list[DirectRelationReference] | None): A list of assets the activity is related to. - equipment (list[DirectRelationReference] | None): A list of equipment the activity is related to. - time_series (list[DirectRelationReference] | None): A list of time series the activity is related to. - type (DirectRelationReference | None): Direct relation pointing to the type node. - deleted_time (int | None): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results + space: The space where the node is located. + external_id: The external id of the Cognite activity. + version: DMS version. + last_updated_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + created_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + name: Name of the instance + description: Description of the instance + tags: Text based labels for generic use, limited to 1000 + aliases: Alternative names for the node + source_id: Identifier from the source system + source_context: Context of the source id. For systems where the sourceId is globally unique, the sourceContext is expected to not be set. + source: Direct relation to a source system + source_created_time: When the instance was created in source system (if available) + source_updated_time: When the instance was last updated in the source system (if available) + source_created_user: User identifier from the source system on who created the source data. This identifier is not guaranteed to match the user identifiers in CDF + source_updated_user: User identifier from the source system on who last updated the source data. This identifier is not guaranteed to match the user identifiers in CDF + start_time: The actual start time of an activity (or similar that extends this) + end_time: The actual end time of an activity (or similar that extends this) + scheduled_start_time: The planned start time of an activity (or similar that extends this) + scheduled_end_time: The planned end time of an activity (or similar that extends this) + assets: A list of assets the activity is related to. + equipment: A list of equipment the activity is related to. + time_series: A list of time series the activity is related to. + type: Direct relation pointing to the type node. + deleted_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results """ def __init__( @@ -1307,25 +1307,25 @@ class CogniteAssetApply(_CogniteAssetProperties, TypedNodeApply): Assets represent systems that support industrial functions or processes. Assets are often called 'functional location'. Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite asset. - object_3d (DirectRelationReference | tuple[str, str] | None | Omitted): Direct relation to an Object3D instance representing the 3D resource - name (str | None | Omitted): Name of the instance - description (str | None | Omitted): Description of the instance - tags (list[str] | None | Omitted): Text based labels for generic use, limited to 1000 - aliases (list[str] | None | Omitted): Alternative names for the node - source_id (str | None | Omitted): Identifier from the source system - source_context (str | None | Omitted): Context of the source id. For systems where the sourceId is globally unique, the sourceContext is expected to not be set. - source (DirectRelationReference | tuple[str, str] | None | Omitted): Direct relation to a source system - source_created_time (datetime | None | Omitted): When the instance was created in source system (if available) - source_updated_time (datetime | None | Omitted): When the instance was last updated in the source system (if available) - source_created_user (str | None | Omitted): User identifier from the source system on who created the source data. This identifier is not guaranteed to match the user identifiers in CDF - source_updated_user (str | None | Omitted): User identifier from the source system on who last updated the source data. This identifier is not guaranteed to match the user identifiers in CDF - parent (DirectRelationReference | tuple[str, str] | None | Omitted): The parent of the asset. - asset_class (DirectRelationReference | tuple[str, str] | None | Omitted): Specifies the class of the asset. It's a direct relation to CogniteAssetClass. - asset_type (DirectRelationReference | tuple[str, str] | None | Omitted): Specifies the type of the asset. It's a direct relation to CogniteAssetType. - existing_version (int | None): Fail the ingestion request if the node's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the node (for the specified container or node). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. - type (DirectRelationReference | tuple[str, str] | None | Omitted): Direct relation pointing to the type node. + space: The space where the node is located. + external_id: The external id of the Cognite asset. + object_3d: Direct relation to an Object3D instance representing the 3D resource + name: Name of the instance + description: Description of the instance + tags: Text based labels for generic use, limited to 1000 + aliases: Alternative names for the node + source_id: Identifier from the source system + source_context: Context of the source id. For systems where the sourceId is globally unique, the sourceContext is expected to not be set. + source: Direct relation to a source system + source_created_time: When the instance was created in source system (if available) + source_updated_time: When the instance was last updated in the source system (if available) + source_created_user: User identifier from the source system on who created the source data. This identifier is not guaranteed to match the user identifiers in CDF + source_updated_user: User identifier from the source system on who last updated the source data. This identifier is not guaranteed to match the user identifiers in CDF + parent: The parent of the asset. + asset_class: Specifies the class of the asset. It's a direct relation to CogniteAssetClass. + asset_type: Specifies the type of the asset. It's a direct relation to CogniteAssetType. + existing_version: Fail the ingestion request if the node's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the node (for the specified container or node). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. + type: Direct relation pointing to the type node. """ def __init__( @@ -1377,31 +1377,31 @@ class CogniteAsset(_CogniteAssetProperties, TypedNode): Assets represent systems that support industrial functions or processes. Assets are often called 'functional location'. Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite asset. - version (int): DMS version. - last_updated_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - created_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - object_3d (DirectRelationReference | None): Direct relation to an Object3D instance representing the 3D resource - name (str | None): Name of the instance - description (str | None): Description of the instance - tags (list[str] | None): Text based labels for generic use, limited to 1000 - aliases (list[str] | None): Alternative names for the node - source_id (str | None): Identifier from the source system - source_context (str | None): Context of the source id. For systems where the sourceId is globally unique, the sourceContext is expected to not be set. - source (DirectRelationReference | None): Direct relation to a source system - source_created_time (datetime | None): When the instance was created in source system (if available) - source_updated_time (datetime | None): When the instance was last updated in the source system (if available) - source_created_user (str | None): User identifier from the source system on who created the source data. This identifier is not guaranteed to match the user identifiers in CDF - source_updated_user (str | None): User identifier from the source system on who last updated the source data. This identifier is not guaranteed to match the user identifiers in CDF - parent (DirectRelationReference | None): The parent of the asset. - root (DirectRelationReference | None): An automatically updated reference to the top-level asset of the hierarchy. - path (list[DirectRelationReference] | None): An automatically updated ordered list of this asset's ancestors, starting with the root asset. Enables subtree filtering to find all assets under a parent. - path_last_updated_time (datetime | None): The last time the path was updated for this asset. - asset_class (DirectRelationReference | None): Specifies the class of the asset. It's a direct relation to CogniteAssetClass. - asset_type (DirectRelationReference | None): Specifies the type of the asset. It's a direct relation to CogniteAssetType. - type (DirectRelationReference | None): Direct relation pointing to the type node. - deleted_time (int | None): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results + space: The space where the node is located. + external_id: The external id of the Cognite asset. + version: DMS version. + last_updated_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + created_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + object_3d: Direct relation to an Object3D instance representing the 3D resource + name: Name of the instance + description: Description of the instance + tags: Text based labels for generic use, limited to 1000 + aliases: Alternative names for the node + source_id: Identifier from the source system + source_context: Context of the source id. For systems where the sourceId is globally unique, the sourceContext is expected to not be set. + source: Direct relation to a source system + source_created_time: When the instance was created in source system (if available) + source_updated_time: When the instance was last updated in the source system (if available) + source_created_user: User identifier from the source system on who created the source data. This identifier is not guaranteed to match the user identifiers in CDF + source_updated_user: User identifier from the source system on who last updated the source data. This identifier is not guaranteed to match the user identifiers in CDF + parent: The parent of the asset. + root: An automatically updated reference to the top-level asset of the hierarchy. + path: An automatically updated ordered list of this asset's ancestors, starting with the root asset. Enables subtree filtering to find all assets under a parent. + path_last_updated_time: The last time the path was updated for this asset. + asset_class: Specifies the class of the asset. It's a direct relation to CogniteAssetClass. + asset_type: Specifies the type of the asset. It's a direct relation to CogniteAssetType. + type: Direct relation pointing to the type node. + deleted_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results """ path_last_updated_time = PropertyOptions("pathLastUpdatedTime") @@ -1493,16 +1493,16 @@ class CogniteAssetClassApply(_CogniteAssetClassProperties, TypedNodeApply): Represents the class of an asset. Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite asset clas. - name (str | None | Omitted): Name of the instance - description (str | None | Omitted): Description of the instance - tags (list[str] | None | Omitted): Text based labels for generic use, limited to 1000 - aliases (list[str] | None | Omitted): Alternative names for the node - code (str | None | Omitted): A unique identifier for the class of asset. - standard (str | None | Omitted): A text string to specify which standard the class is from. - existing_version (int | None): Fail the ingestion request if the node's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the node (for the specified container or node). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. - type (DirectRelationReference | tuple[str, str] | None | Omitted): Direct relation pointing to the type node. + space: The space where the node is located. + external_id: The external id of the Cognite asset clas. + name: Name of the instance + description: Description of the instance + tags: Text based labels for generic use, limited to 1000 + aliases: Alternative names for the node + code: A unique identifier for the class of asset. + standard: A text string to specify which standard the class is from. + existing_version: Fail the ingestion request if the node's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the node (for the specified container or node). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. + type: Direct relation pointing to the type node. """ def __init__( @@ -1536,19 +1536,19 @@ class CogniteAssetClass(_CogniteAssetClassProperties, TypedNode): Represents the class of an asset. Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite asset clas. - version (int): DMS version. - last_updated_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - created_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - name (str | None): Name of the instance - description (str | None): Description of the instance - tags (list[str] | None): Text based labels for generic use, limited to 1000 - aliases (list[str] | None): Alternative names for the node - code (str | None): A unique identifier for the class of asset. - standard (str | None): A text string to specify which standard the class is from. - type (DirectRelationReference | None): Direct relation pointing to the type node. - deleted_time (int | None): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results + space: The space where the node is located. + external_id: The external id of the Cognite asset clas. + version: DMS version. + last_updated_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + created_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + name: Name of the instance + description: Description of the instance + tags: Text based labels for generic use, limited to 1000 + aliases: Alternative names for the node + code: A unique identifier for the class of asset. + standard: A text string to specify which standard the class is from. + type: Direct relation pointing to the type node. + deleted_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results """ def __init__( @@ -1607,17 +1607,17 @@ class CogniteAssetTypeApply(_CogniteAssetTypeProperties, TypedNodeApply): Represents the type of an asset. Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite asset type. - name (str | None | Omitted): Name of the instance - description (str | None | Omitted): Description of the instance - tags (list[str] | None | Omitted): Text based labels for generic use, limited to 1000 - aliases (list[str] | None | Omitted): Alternative names for the node - code (str | None | Omitted): A unique identifier for the type of asset. - standard (str | None | Omitted): A text string to specify which standard the type is from. - asset_class (DirectRelationReference | tuple[str, str] | None | Omitted): Specifies the class the type belongs to. It's a direct relation to CogniteAssetClass. - existing_version (int | None): Fail the ingestion request if the node's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the node (for the specified container or node). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. - type (DirectRelationReference | tuple[str, str] | None | Omitted): Direct relation pointing to the type node. + space: The space where the node is located. + external_id: The external id of the Cognite asset type. + name: Name of the instance + description: Description of the instance + tags: Text based labels for generic use, limited to 1000 + aliases: Alternative names for the node + code: A unique identifier for the type of asset. + standard: A text string to specify which standard the type is from. + asset_class: Specifies the class the type belongs to. It's a direct relation to CogniteAssetClass. + existing_version: Fail the ingestion request if the node's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the node (for the specified container or node). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. + type: Direct relation pointing to the type node. """ def __init__( @@ -1653,20 +1653,20 @@ class CogniteAssetType(_CogniteAssetTypeProperties, TypedNode): Represents the type of an asset. Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite asset type. - version (int): DMS version. - last_updated_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - created_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - name (str | None): Name of the instance - description (str | None): Description of the instance - tags (list[str] | None): Text based labels for generic use, limited to 1000 - aliases (list[str] | None): Alternative names for the node - code (str | None): A unique identifier for the type of asset. - standard (str | None): A text string to specify which standard the type is from. - asset_class (DirectRelationReference | None): Specifies the class the type belongs to. It's a direct relation to CogniteAssetClass. - type (DirectRelationReference | None): Direct relation pointing to the type node. - deleted_time (int | None): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results + space: The space where the node is located. + external_id: The external id of the Cognite asset type. + version: DMS version. + last_updated_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + created_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + name: Name of the instance + description: Description of the instance + tags: Text based labels for generic use, limited to 1000 + aliases: Alternative names for the node + code: A unique identifier for the type of asset. + standard: A text string to specify which standard the type is from. + asset_class: Specifies the class the type belongs to. It's a direct relation to CogniteAssetClass. + type: Direct relation pointing to the type node. + deleted_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results """ def __init__( @@ -1728,16 +1728,16 @@ class CogniteCADModelApply(_CogniteCADModelProperties, TypedNodeApply): Navigational aid for traversing CogniteCADModel instances Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite cad model. - name (str | None | Omitted): Name of the instance - description (str | None | Omitted): Description of the instance - tags (list[str] | None | Omitted): Text based labels for generic use, limited to 1000 - aliases (list[str] | None | Omitted): Alternative names for the node - model_type (Literal['CAD', 'Image360', 'PointCloud'] | None | Omitted): CAD, PointCloud or Image360 - thumbnail (DirectRelationReference | tuple[str, str] | None | Omitted): Thumbnail of the 3D model - existing_version (int | None): Fail the ingestion request if the node's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the node (for the specified container or node). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. - type (DirectRelationReference | tuple[str, str] | None | Omitted): Direct relation pointing to the type node. + space: The space where the node is located. + external_id: The external id of the Cognite cad model. + name: Name of the instance + description: Description of the instance + tags: Text based labels for generic use, limited to 1000 + aliases: Alternative names for the node + model_type: CAD, PointCloud or Image360 + thumbnail: Thumbnail of the 3D model + existing_version: Fail the ingestion request if the node's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the node (for the specified container or node). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. + type: Direct relation pointing to the type node. """ def __init__( @@ -1771,19 +1771,19 @@ class CogniteCADModel(_CogniteCADModelProperties, TypedNode): Navigational aid for traversing CogniteCADModel instances Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite cad model. - version (int): DMS version. - last_updated_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - created_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - name (str | None): Name of the instance - description (str | None): Description of the instance - tags (list[str] | None): Text based labels for generic use, limited to 1000 - aliases (list[str] | None): Alternative names for the node - model_type (Literal['CAD', 'Image360', 'PointCloud'] | None): CAD, PointCloud or Image360 - thumbnail (DirectRelationReference | None): Thumbnail of the 3D model - type (DirectRelationReference | None): Direct relation pointing to the type node. - deleted_time (int | None): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results + space: The space where the node is located. + external_id: The external id of the Cognite cad model. + version: DMS version. + last_updated_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + created_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + name: Name of the instance + description: Description of the instance + tags: Text based labels for generic use, limited to 1000 + aliases: Alternative names for the node + model_type: CAD, PointCloud or Image360 + thumbnail: Thumbnail of the 3D model + type: Direct relation pointing to the type node. + deleted_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results """ def __init__( @@ -1846,20 +1846,20 @@ class CogniteCADNodeApply(_CogniteCADNodeProperties, TypedNodeApply): Represents nodes from the 3D model that have been contextualized Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite cad node. - name (str | None | Omitted): Name of the instance - description (str | None | Omitted): Description of the instance - tags (list[str] | None | Omitted): Text based labels for generic use, limited to 1000 - aliases (list[str] | None | Omitted): Alternative names for the node - object_3d (DirectRelationReference | tuple[str, str] | None | Omitted): Direct relation to object3D grouping for this node - model_3d (DirectRelationReference | tuple[str, str] | None | Omitted): Direct relation to Cognite3DModel - cad_node_reference (str | None | Omitted): Reference to a node within a CAD model from the 3D API - revisions (list[DirectRelationReference | tuple[str, str]] | None | Omitted): List of direct relations to instances of Cognite3DRevision which this CogniteCADNode exists in. - tree_indexes (list[int] | None | Omitted): List of tree indexes in the same order as revisions. Used by Reveal and similar applications to map from CogniteCADNode to tree index - sub_tree_sizes (list[int] | None | Omitted): List of subtree sizes in the same order as revisions. Used by Reveal and similar applications to know how many nodes exists below this node in the hierarchy - existing_version (int | None): Fail the ingestion request if the node's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the node (for the specified container or node). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. - type (DirectRelationReference | tuple[str, str] | None | Omitted): Direct relation pointing to the type node. + space: The space where the node is located. + external_id: The external id of the Cognite cad node. + name: Name of the instance + description: Description of the instance + tags: Text based labels for generic use, limited to 1000 + aliases: Alternative names for the node + object_3d: Direct relation to object3D grouping for this node + model_3d: Direct relation to Cognite3DModel + cad_node_reference: Reference to a node within a CAD model from the 3D API + revisions: List of direct relations to instances of Cognite3DRevision which this CogniteCADNode exists in. + tree_indexes: List of tree indexes in the same order as revisions. Used by Reveal and similar applications to map from CogniteCADNode to tree index + sub_tree_sizes: List of subtree sizes in the same order as revisions. Used by Reveal and similar applications to know how many nodes exists below this node in the hierarchy + existing_version: Fail the ingestion request if the node's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the node (for the specified container or node). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. + type: Direct relation pointing to the type node. """ def __init__( @@ -1901,23 +1901,23 @@ class CogniteCADNode(_CogniteCADNodeProperties, TypedNode): Represents nodes from the 3D model that have been contextualized Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite cad node. - version (int): DMS version. - last_updated_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - created_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - name (str | None): Name of the instance - description (str | None): Description of the instance - tags (list[str] | None): Text based labels for generic use, limited to 1000 - aliases (list[str] | None): Alternative names for the node - object_3d (DirectRelationReference | None): Direct relation to object3D grouping for this node - model_3d (DirectRelationReference | None): Direct relation to Cognite3DModel - cad_node_reference (str | None): Reference to a node within a CAD model from the 3D API - revisions (list[DirectRelationReference] | None): List of direct relations to instances of Cognite3DRevision which this CogniteCADNode exists in. - tree_indexes (list[int] | None): List of tree indexes in the same order as revisions. Used by Reveal and similar applications to map from CogniteCADNode to tree index - sub_tree_sizes (list[int] | None): List of subtree sizes in the same order as revisions. Used by Reveal and similar applications to know how many nodes exists below this node in the hierarchy - type (DirectRelationReference | None): Direct relation pointing to the type node. - deleted_time (int | None): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results + space: The space where the node is located. + external_id: The external id of the Cognite cad node. + version: DMS version. + last_updated_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + created_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + name: Name of the instance + description: Description of the instance + tags: Text based labels for generic use, limited to 1000 + aliases: Alternative names for the node + object_3d: Direct relation to object3D grouping for this node + model_3d: Direct relation to Cognite3DModel + cad_node_reference: Reference to a node within a CAD model from the 3D API + revisions: List of direct relations to instances of Cognite3DRevision which this CogniteCADNode exists in. + tree_indexes: List of tree indexes in the same order as revisions. Used by Reveal and similar applications to map from CogniteCADNode to tree index + sub_tree_sizes: List of subtree sizes in the same order as revisions. Used by Reveal and similar applications to know how many nodes exists below this node in the hierarchy + type: Direct relation pointing to the type node. + deleted_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results """ def __init__( @@ -1988,15 +1988,15 @@ class CogniteCADRevisionApply(_CogniteCADRevisionProperties, TypedNodeApply): It is used when data is written to CDF. Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite cad revision. - status (Literal['Done', 'Failed', 'Processing', 'Queued'] | None | Omitted): The status field. - published (bool | None | Omitted): The published field. - revision_type (Literal['CAD', 'Image360', 'PointCloud'] | None | Omitted): The revision type field. - model_3d (DirectRelationReference | tuple[str, str] | None | Omitted): . - revision_id (int | None | Omitted): The 3D API revision identifier for this CAD model - existing_version (int | None): Fail the ingestion request if the node's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the node (for the specified container or node). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. - type (DirectRelationReference | tuple[str, str] | None | Omitted): Direct relation pointing to the type node. + space: The space where the node is located. + external_id: The external id of the Cognite cad revision. + status: The status field. + published: The published field. + revision_type: The revision type field. + model_3d: . + revision_id: The 3D API revision identifier for this CAD model + existing_version: Fail the ingestion request if the node's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the node (for the specified container or node). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. + type: Direct relation pointing to the type node. """ def __init__( @@ -2026,18 +2026,18 @@ class CogniteCADRevision(_CogniteCADRevisionProperties, TypedNode): It is used when data is read from CDF. Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite cad revision. - version (int): DMS version. - last_updated_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - created_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - status (Literal['Done', 'Failed', 'Processing', 'Queued'] | None): The status field. - published (bool | None): The published field. - revision_type (Literal['CAD', 'Image360', 'PointCloud'] | None): The revision type field. - model_3d (DirectRelationReference | None): . - revision_id (int | None): The 3D API revision identifier for this CAD model - type (DirectRelationReference | None): Direct relation pointing to the type node. - deleted_time (int | None): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results + space: The space where the node is located. + external_id: The external id of the Cognite cad revision. + version: DMS version. + last_updated_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + created_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + status: The status field. + published: The published field. + revision_type: The revision type field. + model_3d: . + revision_id: The 3D API revision identifier for this CAD model + type: Direct relation pointing to the type node. + deleted_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results """ def __init__( @@ -2091,16 +2091,16 @@ class CogniteCubeMapApply(_CogniteCubeMapProperties, TypedNodeApply): The cube map holds references to 6 images in used to visually represent the surrounding environment Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite cube map. - front (DirectRelationReference | tuple[str, str] | None | Omitted): Direct relation to a file holding the front projection of the cube map - back (DirectRelationReference | tuple[str, str] | None | Omitted): Direct relation to a file holding the back projection of the cube map - left (DirectRelationReference | tuple[str, str] | None | Omitted): Direct relation to a file holding the left projection of the cube map - right (DirectRelationReference | tuple[str, str] | None | Omitted): Direct relation to a file holding the right projection of the cube map - top (DirectRelationReference | tuple[str, str] | None | Omitted): Direct relation to a file holding the top projection of the cube map - bottom (DirectRelationReference | tuple[str, str] | None | Omitted): Direct relation to a file holding the bottom projection of the cube map - existing_version (int | None): Fail the ingestion request if the node's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the node (for the specified container or node). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. - type (DirectRelationReference | tuple[str, str] | None | Omitted): Direct relation pointing to the type node. + space: The space where the node is located. + external_id: The external id of the Cognite cube map. + front: Direct relation to a file holding the front projection of the cube map + back: Direct relation to a file holding the back projection of the cube map + left: Direct relation to a file holding the left projection of the cube map + right: Direct relation to a file holding the right projection of the cube map + top: Direct relation to a file holding the top projection of the cube map + bottom: Direct relation to a file holding the bottom projection of the cube map + existing_version: Fail the ingestion request if the node's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the node (for the specified container or node). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. + type: Direct relation pointing to the type node. """ def __init__( @@ -2134,19 +2134,19 @@ class CogniteCubeMap(_CogniteCubeMapProperties, TypedNode): The cube map holds references to 6 images in used to visually represent the surrounding environment Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite cube map. - version (int): DMS version. - last_updated_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - created_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - front (DirectRelationReference | None): Direct relation to a file holding the front projection of the cube map - back (DirectRelationReference | None): Direct relation to a file holding the back projection of the cube map - left (DirectRelationReference | None): Direct relation to a file holding the left projection of the cube map - right (DirectRelationReference | None): Direct relation to a file holding the right projection of the cube map - top (DirectRelationReference | None): Direct relation to a file holding the top projection of the cube map - bottom (DirectRelationReference | None): Direct relation to a file holding the bottom projection of the cube map - type (DirectRelationReference | None): Direct relation pointing to the type node. - deleted_time (int | None): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results + space: The space where the node is located. + external_id: The external id of the Cognite cube map. + version: DMS version. + last_updated_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + created_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + front: Direct relation to a file holding the front projection of the cube map + back: Direct relation to a file holding the back projection of the cube map + left: Direct relation to a file holding the left projection of the cube map + right: Direct relation to a file holding the right projection of the cube map + top: Direct relation to a file holding the top projection of the cube map + bottom: Direct relation to a file holding the bottom projection of the cube map + type: Direct relation pointing to the type node. + deleted_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results """ def __init__( @@ -2203,14 +2203,14 @@ class CogniteDescribableNodeApply(_CogniteDescribableProperties, TypedNodeApply) The describable core concept is used as a standard way of holding the bare minimum of information about the instance Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite describable node. - name (str | None | Omitted): Name of the instance - description (str | None | Omitted): Description of the instance - tags (list[str] | None | Omitted): Text based labels for generic use, limited to 1000 - aliases (list[str] | None | Omitted): Alternative names for the node - existing_version (int | None): Fail the ingestion request if the node's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the node (for the specified container or node). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. - type (DirectRelationReference | tuple[str, str] | None | Omitted): Direct relation pointing to the type node. + space: The space where the node is located. + external_id: The external id of the Cognite describable node. + name: Name of the instance + description: Description of the instance + tags: Text based labels for generic use, limited to 1000 + aliases: Alternative names for the node + existing_version: Fail the ingestion request if the node's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the node (for the specified container or node). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. + type: Direct relation pointing to the type node. """ def __init__( @@ -2240,17 +2240,17 @@ class CogniteDescribableNode(_CogniteDescribableProperties, TypedNode): The describable core concept is used as a standard way of holding the bare minimum of information about the instance Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite describable node. - version (int): DMS version. - last_updated_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - created_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - name (str | None): Name of the instance - description (str | None): Description of the instance - tags (list[str] | None): Text based labels for generic use, limited to 1000 - aliases (list[str] | None): Alternative names for the node - type (DirectRelationReference | None): Direct relation pointing to the type node. - deleted_time (int | None): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results + space: The space where the node is located. + external_id: The external id of the Cognite describable node. + version: DMS version. + last_updated_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + created_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + name: Name of the instance + description: Description of the instance + tags: Text based labels for generic use, limited to 1000 + aliases: Alternative names for the node + type: Direct relation pointing to the type node. + deleted_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results """ def __init__( @@ -2310,26 +2310,26 @@ class CogniteEquipmentApply(_CogniteEquipmentProperties, TypedNodeApply): Equipment represents physical supplies or devices. Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite equipment. - name (str | None | Omitted): Name of the instance - description (str | None | Omitted): Description of the instance - tags (list[str] | None | Omitted): Text based labels for generic use, limited to 1000 - aliases (list[str] | None | Omitted): Alternative names for the node - source_id (str | None | Omitted): Identifier from the source system - source_context (str | None | Omitted): Context of the source id. For systems where the sourceId is globally unique, the sourceContext is expected to not be set. - source (DirectRelationReference | tuple[str, str] | None | Omitted): Direct relation to a source system - source_created_time (datetime | None | Omitted): When the instance was created in source system (if available) - source_updated_time (datetime | None | Omitted): When the instance was last updated in the source system (if available) - source_created_user (str | None | Omitted): User identifier from the source system on who created the source data. This identifier is not guaranteed to match the user identifiers in CDF - source_updated_user (str | None | Omitted): User identifier from the source system on who last updated the source data. This identifier is not guaranteed to match the user identifiers in CDF - asset (DirectRelationReference | tuple[str, str] | None | Omitted): The asset the equipment is related to. - serial_number (str | None | Omitted): The serial number of the equipment. - manufacturer (str | None | Omitted): The manufacturer of the equipment. - equipment_type (DirectRelationReference | tuple[str, str] | None | Omitted): Specifies the type of the equipment. It's a direct relation to CogniteEquipmentType. - files (list[DirectRelationReference | tuple[str, str]] | None | Omitted): A list of files the equipment relates to. - existing_version (int | None): Fail the ingestion request if the node's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the node (for the specified container or node). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. - type (DirectRelationReference | tuple[str, str] | None | Omitted): Direct relation pointing to the type node. + space: The space where the node is located. + external_id: The external id of the Cognite equipment. + name: Name of the instance + description: Description of the instance + tags: Text based labels for generic use, limited to 1000 + aliases: Alternative names for the node + source_id: Identifier from the source system + source_context: Context of the source id. For systems where the sourceId is globally unique, the sourceContext is expected to not be set. + source: Direct relation to a source system + source_created_time: When the instance was created in source system (if available) + source_updated_time: When the instance was last updated in the source system (if available) + source_created_user: User identifier from the source system on who created the source data. This identifier is not guaranteed to match the user identifiers in CDF + source_updated_user: User identifier from the source system on who last updated the source data. This identifier is not guaranteed to match the user identifiers in CDF + asset: The asset the equipment is related to. + serial_number: The serial number of the equipment. + manufacturer: The manufacturer of the equipment. + equipment_type: Specifies the type of the equipment. It's a direct relation to CogniteEquipmentType. + files: A list of files the equipment relates to. + existing_version: Fail the ingestion request if the node's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the node (for the specified container or node). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. + type: Direct relation pointing to the type node. """ def __init__( @@ -2383,29 +2383,29 @@ class CogniteEquipment(_CogniteEquipmentProperties, TypedNode): Equipment represents physical supplies or devices. Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite equipment. - version (int): DMS version. - last_updated_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - created_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - name (str | None): Name of the instance - description (str | None): Description of the instance - tags (list[str] | None): Text based labels for generic use, limited to 1000 - aliases (list[str] | None): Alternative names for the node - source_id (str | None): Identifier from the source system - source_context (str | None): Context of the source id. For systems where the sourceId is globally unique, the sourceContext is expected to not be set. - source (DirectRelationReference | None): Direct relation to a source system - source_created_time (datetime | None): When the instance was created in source system (if available) - source_updated_time (datetime | None): When the instance was last updated in the source system (if available) - source_created_user (str | None): User identifier from the source system on who created the source data. This identifier is not guaranteed to match the user identifiers in CDF - source_updated_user (str | None): User identifier from the source system on who last updated the source data. This identifier is not guaranteed to match the user identifiers in CDF - asset (DirectRelationReference | None): The asset the equipment is related to. - serial_number (str | None): The serial number of the equipment. - manufacturer (str | None): The manufacturer of the equipment. - equipment_type (DirectRelationReference | None): Specifies the type of the equipment. It's a direct relation to CogniteEquipmentType. - files (list[DirectRelationReference] | None): A list of files the equipment relates to. - type (DirectRelationReference | None): Direct relation pointing to the type node. - deleted_time (int | None): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results + space: The space where the node is located. + external_id: The external id of the Cognite equipment. + version: DMS version. + last_updated_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + created_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + name: Name of the instance + description: Description of the instance + tags: Text based labels for generic use, limited to 1000 + aliases: Alternative names for the node + source_id: Identifier from the source system + source_context: Context of the source id. For systems where the sourceId is globally unique, the sourceContext is expected to not be set. + source: Direct relation to a source system + source_created_time: When the instance was created in source system (if available) + source_updated_time: When the instance was last updated in the source system (if available) + source_created_user: User identifier from the source system on who created the source data. This identifier is not guaranteed to match the user identifiers in CDF + source_updated_user: User identifier from the source system on who last updated the source data. This identifier is not guaranteed to match the user identifiers in CDF + asset: The asset the equipment is related to. + serial_number: The serial number of the equipment. + manufacturer: The manufacturer of the equipment. + equipment_type: Specifies the type of the equipment. It's a direct relation to CogniteEquipmentType. + files: A list of files the equipment relates to. + type: Direct relation pointing to the type node. + deleted_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results """ def __init__( @@ -2495,18 +2495,18 @@ class CogniteEquipmentTypeApply(_CogniteEquipmentTypeProperties, TypedNodeApply) Represents the type of equipment. Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite equipment type. - name (str | None | Omitted): Name of the instance - description (str | None | Omitted): Description of the instance - tags (list[str] | None | Omitted): Text based labels for generic use, limited to 1000 - aliases (list[str] | None | Omitted): Alternative names for the node - code (str | None | Omitted): A unique identifier for the type of equipment. - equipment_class (str | None | Omitted): Represents the class of equipment. - standard (str | None | Omitted): An identifier for the standard this equipment type is sourced from, for example, ISO14224. - standard_reference (str | None | Omitted): A reference to the source of the equipment standard. - existing_version (int | None): Fail the ingestion request if the node's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the node (for the specified container or node). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. - type (DirectRelationReference | tuple[str, str] | None | Omitted): Direct relation pointing to the type node. + space: The space where the node is located. + external_id: The external id of the Cognite equipment type. + name: Name of the instance + description: Description of the instance + tags: Text based labels for generic use, limited to 1000 + aliases: Alternative names for the node + code: A unique identifier for the type of equipment. + equipment_class: Represents the class of equipment. + standard: An identifier for the standard this equipment type is sourced from, for example, ISO14224. + standard_reference: A reference to the source of the equipment standard. + existing_version: Fail the ingestion request if the node's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the node (for the specified container or node). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. + type: Direct relation pointing to the type node. """ def __init__( @@ -2544,21 +2544,21 @@ class CogniteEquipmentType(_CogniteEquipmentTypeProperties, TypedNode): Represents the type of equipment. Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite equipment type. - version (int): DMS version. - last_updated_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - created_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - name (str | None): Name of the instance - description (str | None): Description of the instance - tags (list[str] | None): Text based labels for generic use, limited to 1000 - aliases (list[str] | None): Alternative names for the node - code (str | None): A unique identifier for the type of equipment. - equipment_class (str | None): Represents the class of equipment. - standard (str | None): An identifier for the standard this equipment type is sourced from, for example, ISO14224. - standard_reference (str | None): A reference to the source of the equipment standard. - type (DirectRelationReference | None): Direct relation pointing to the type node. - deleted_time (int | None): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results + space: The space where the node is located. + external_id: The external id of the Cognite equipment type. + version: DMS version. + last_updated_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + created_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + name: Name of the instance + description: Description of the instance + tags: Text based labels for generic use, limited to 1000 + aliases: Alternative names for the node + code: A unique identifier for the type of equipment. + equipment_class: Represents the class of equipment. + standard: An identifier for the standard this equipment type is sourced from, for example, ISO14224. + standard_reference: A reference to the source of the equipment standard. + type: Direct relation pointing to the type node. + deleted_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results """ def __init__( @@ -2629,25 +2629,25 @@ class CogniteFileApply(_CogniteFileProperties, TypedNodeApply): Represents files. Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite file. - name (str | None | Omitted): Name of the instance - description (str | None | Omitted): Description of the instance - tags (list[str] | None | Omitted): Text based labels for generic use, limited to 1000 - aliases (list[str] | None | Omitted): Alternative names for the node - source_id (str | None | Omitted): Identifier from the source system - source_context (str | None | Omitted): Context of the source id. For systems where the sourceId is globally unique, the sourceContext is expected to not be set. - source (DirectRelationReference | tuple[str, str] | None | Omitted): Direct relation to a source system - source_created_time (datetime | None | Omitted): When the instance was created in source system (if available) - source_updated_time (datetime | None | Omitted): When the instance was last updated in the source system (if available) - source_created_user (str | None | Omitted): User identifier from the source system on who created the source data. This identifier is not guaranteed to match the user identifiers in CDF - source_updated_user (str | None | Omitted): User identifier from the source system on who last updated the source data. This identifier is not guaranteed to match the user identifiers in CDF - assets (list[DirectRelationReference | tuple[str, str]] | None | Omitted): A list of assets this file is related to. - mime_type (str | None | Omitted): The MIME type of the file. - directory (str | None | Omitted): Contains the path elements from the source (if the source system has a file system hierarchy or similar.) - category (DirectRelationReference | tuple[str, str] | None | Omitted): Specifies the detected category the file belongs to. It's a direct relation to an instance of CogniteFileCategory. - existing_version (int | None): Fail the ingestion request if the node's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the node (for the specified container or node). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. - type (DirectRelationReference | tuple[str, str] | None | Omitted): Direct relation pointing to the type node. + space: The space where the node is located. + external_id: The external id of the Cognite file. + name: Name of the instance + description: Description of the instance + tags: Text based labels for generic use, limited to 1000 + aliases: Alternative names for the node + source_id: Identifier from the source system + source_context: Context of the source id. For systems where the sourceId is globally unique, the sourceContext is expected to not be set. + source: Direct relation to a source system + source_created_time: When the instance was created in source system (if available) + source_updated_time: When the instance was last updated in the source system (if available) + source_created_user: User identifier from the source system on who created the source data. This identifier is not guaranteed to match the user identifiers in CDF + source_updated_user: User identifier from the source system on who last updated the source data. This identifier is not guaranteed to match the user identifiers in CDF + assets: A list of assets this file is related to. + mime_type: The MIME type of the file. + directory: Contains the path elements from the source (if the source system has a file system hierarchy or similar.) + category: Specifies the detected category the file belongs to. It's a direct relation to an instance of CogniteFileCategory. + existing_version: Fail the ingestion request if the node's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the node (for the specified container or node). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. + type: Direct relation pointing to the type node. """ def __init__( @@ -2699,30 +2699,30 @@ class CogniteFile(_CogniteFileProperties, TypedNode): Represents files. Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite file. - version (int): DMS version. - last_updated_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - created_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - name (str | None): Name of the instance - description (str | None): Description of the instance - tags (list[str] | None): Text based labels for generic use, limited to 1000 - aliases (list[str] | None): Alternative names for the node - source_id (str | None): Identifier from the source system - source_context (str | None): Context of the source id. For systems where the sourceId is globally unique, the sourceContext is expected to not be set. - source (DirectRelationReference | None): Direct relation to a source system - source_created_time (datetime | None): When the instance was created in source system (if available) - source_updated_time (datetime | None): When the instance was last updated in the source system (if available) - source_created_user (str | None): User identifier from the source system on who created the source data. This identifier is not guaranteed to match the user identifiers in CDF - source_updated_user (str | None): User identifier from the source system on who last updated the source data. This identifier is not guaranteed to match the user identifiers in CDF - assets (list[DirectRelationReference] | None): A list of assets this file is related to. - mime_type (str | None): The MIME type of the file. - directory (str | None): Contains the path elements from the source (if the source system has a file system hierarchy or similar.) - is_uploaded (bool | None): Specifies if the file content has been uploaded to Cognite Data Fusion or not. - uploaded_time (datetime | None): The time the file upload completed. - category (DirectRelationReference | None): Specifies the detected category the file belongs to. It's a direct relation to an instance of CogniteFileCategory. - type (DirectRelationReference | None): Direct relation pointing to the type node. - deleted_time (int | None): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results + space: The space where the node is located. + external_id: The external id of the Cognite file. + version: DMS version. + last_updated_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + created_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + name: Name of the instance + description: Description of the instance + tags: Text based labels for generic use, limited to 1000 + aliases: Alternative names for the node + source_id: Identifier from the source system + source_context: Context of the source id. For systems where the sourceId is globally unique, the sourceContext is expected to not be set. + source: Direct relation to a source system + source_created_time: When the instance was created in source system (if available) + source_updated_time: When the instance was last updated in the source system (if available) + source_created_user: User identifier from the source system on who created the source data. This identifier is not guaranteed to match the user identifiers in CDF + source_updated_user: User identifier from the source system on who last updated the source data. This identifier is not guaranteed to match the user identifiers in CDF + assets: A list of assets this file is related to. + mime_type: The MIME type of the file. + directory: Contains the path elements from the source (if the source system has a file system hierarchy or similar.) + is_uploaded: Specifies if the file content has been uploaded to Cognite Data Fusion or not. + uploaded_time: The time the file upload completed. + category: Specifies the detected category the file belongs to. It's a direct relation to an instance of CogniteFileCategory. + type: Direct relation pointing to the type node. + deleted_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results """ is_uploaded = PropertyOptions("isUploaded") @@ -2815,17 +2815,17 @@ class CogniteFileCategoryApply(_CogniteFileCategoryProperties, TypedNodeApply): Represents the categories of files as determined by contextualization or categorization. Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite file category. - code (str): An identifier for the category, for example, 'AA' for Accounting (from Norsok.) - name (str | None | Omitted): Name of the instance - description (str | None | Omitted): Description of the instance - tags (list[str] | None | Omitted): Text based labels for generic use, limited to 1000 - aliases (list[str] | None | Omitted): Alternative names for the node - standard (str | None | Omitted): The name of the standard the category originates from, for example, 'Norsok'. - standard_reference (str | None | Omitted): A reference to the source of the category standard. - existing_version (int | None): Fail the ingestion request if the node's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the node (for the specified container or node). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. - type (DirectRelationReference | tuple[str, str] | None | Omitted): Direct relation pointing to the type node. + space: The space where the node is located. + external_id: The external id of the Cognite file category. + code: An identifier for the category, for example, 'AA' for Accounting (from Norsok.) + name: Name of the instance + description: Description of the instance + tags: Text based labels for generic use, limited to 1000 + aliases: Alternative names for the node + standard: The name of the standard the category originates from, for example, 'Norsok'. + standard_reference: A reference to the source of the category standard. + existing_version: Fail the ingestion request if the node's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the node (for the specified container or node). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. + type: Direct relation pointing to the type node. """ def __init__( @@ -2861,20 +2861,20 @@ class CogniteFileCategory(_CogniteFileCategoryProperties, TypedNode): Represents the categories of files as determined by contextualization or categorization. Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite file category. - version (int): DMS version. - last_updated_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - created_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - code (str): An identifier for the category, for example, 'AA' for Accounting (from Norsok.) - name (str | None): Name of the instance - description (str | None): Description of the instance - tags (list[str] | None): Text based labels for generic use, limited to 1000 - aliases (list[str] | None): Alternative names for the node - standard (str | None): The name of the standard the category originates from, for example, 'Norsok'. - standard_reference (str | None): A reference to the source of the category standard. - type (DirectRelationReference | None): Direct relation pointing to the type node. - deleted_time (int | None): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results + space: The space where the node is located. + external_id: The external id of the Cognite file category. + version: DMS version. + last_updated_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + created_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + code: An identifier for the category, for example, 'AA' for Accounting (from Norsok.) + name: Name of the instance + description: Description of the instance + tags: Text based labels for generic use, limited to 1000 + aliases: Alternative names for the node + standard: The name of the standard the category originates from, for example, 'Norsok'. + standard_reference: A reference to the source of the category standard. + type: Direct relation pointing to the type node. + deleted_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results """ def __init__( @@ -2936,16 +2936,16 @@ class CognitePointCloudModelApply(_CognitePointCloudModelProperties, TypedNodeAp Navigational aid for traversing CognitePointCloudModel instances Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite point cloud model. - name (str | None | Omitted): Name of the instance - description (str | None | Omitted): Description of the instance - tags (list[str] | None | Omitted): Text based labels for generic use, limited to 1000 - aliases (list[str] | None | Omitted): Alternative names for the node - model_type (Literal['CAD', 'Image360', 'PointCloud'] | None | Omitted): CAD, PointCloud or Image360 - thumbnail (DirectRelationReference | tuple[str, str] | None | Omitted): Thumbnail of the 3D model - existing_version (int | None): Fail the ingestion request if the node's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the node (for the specified container or node). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. - type (DirectRelationReference | tuple[str, str] | None | Omitted): Direct relation pointing to the type node. + space: The space where the node is located. + external_id: The external id of the Cognite point cloud model. + name: Name of the instance + description: Description of the instance + tags: Text based labels for generic use, limited to 1000 + aliases: Alternative names for the node + model_type: CAD, PointCloud or Image360 + thumbnail: Thumbnail of the 3D model + existing_version: Fail the ingestion request if the node's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the node (for the specified container or node). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. + type: Direct relation pointing to the type node. """ def __init__( @@ -2979,19 +2979,19 @@ class CognitePointCloudModel(_CognitePointCloudModelProperties, TypedNode): Navigational aid for traversing CognitePointCloudModel instances Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite point cloud model. - version (int): DMS version. - last_updated_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - created_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - name (str | None): Name of the instance - description (str | None): Description of the instance - tags (list[str] | None): Text based labels for generic use, limited to 1000 - aliases (list[str] | None): Alternative names for the node - model_type (Literal['CAD', 'Image360', 'PointCloud'] | None): CAD, PointCloud or Image360 - thumbnail (DirectRelationReference | None): Thumbnail of the 3D model - type (DirectRelationReference | None): Direct relation pointing to the type node. - deleted_time (int | None): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results + space: The space where the node is located. + external_id: The external id of the Cognite point cloud model. + version: DMS version. + last_updated_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + created_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + name: Name of the instance + description: Description of the instance + tags: Text based labels for generic use, limited to 1000 + aliases: Alternative names for the node + model_type: CAD, PointCloud or Image360 + thumbnail: Thumbnail of the 3D model + type: Direct relation pointing to the type node. + deleted_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results """ def __init__( @@ -3052,15 +3052,15 @@ class CognitePointCloudRevisionApply(_CognitePointCloudRevisionProperties, Typed Navigational aid for traversing CognitePointCloudRevision instances Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite point cloud revision. - status (Literal['Done', 'Failed', 'Processing', 'Queued'] | None | Omitted): The status field. - published (bool | None | Omitted): The published field. - revision_type (Literal['CAD', 'Image360', 'PointCloud'] | None | Omitted): The revision type field. - model_3d (DirectRelationReference | tuple[str, str] | None | Omitted): . - revision_id (int | None | Omitted): The 3D API revision identifier for this PointCloud model - existing_version (int | None): Fail the ingestion request if the node's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the node (for the specified container or node). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. - type (DirectRelationReference | tuple[str, str] | None | Omitted): Direct relation pointing to the type node. + space: The space where the node is located. + external_id: The external id of the Cognite point cloud revision. + status: The status field. + published: The published field. + revision_type: The revision type field. + model_3d: . + revision_id: The 3D API revision identifier for this PointCloud model + existing_version: Fail the ingestion request if the node's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the node (for the specified container or node). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. + type: Direct relation pointing to the type node. """ def __init__( @@ -3092,18 +3092,18 @@ class CognitePointCloudRevision(_CognitePointCloudRevisionProperties, TypedNode) Navigational aid for traversing CognitePointCloudRevision instances Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite point cloud revision. - version (int): DMS version. - last_updated_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - created_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - status (Literal['Done', 'Failed', 'Processing', 'Queued'] | None): The status field. - published (bool | None): The published field. - revision_type (Literal['CAD', 'Image360', 'PointCloud'] | None): The revision type field. - model_3d (DirectRelationReference | None): . - revision_id (int | None): The 3D API revision identifier for this PointCloud model - type (DirectRelationReference | None): Direct relation pointing to the type node. - deleted_time (int | None): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results + space: The space where the node is located. + external_id: The external id of the Cognite point cloud revision. + version: DMS version. + last_updated_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + created_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + status: The status field. + published: The published field. + revision_type: The revision type field. + model_3d: . + revision_id: The 3D API revision identifier for this PointCloud model + type: Direct relation pointing to the type node. + deleted_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results """ def __init__( @@ -3163,21 +3163,21 @@ class CognitePointCloudVolumeApply(_CognitePointCloudVolumeProperties, TypedNode PointCloud volume definition Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite point cloud volume. - name (str | None | Omitted): Name of the instance - description (str | None | Omitted): Description of the instance - tags (list[str] | None | Omitted): Text based labels for generic use, limited to 1000 - aliases (list[str] | None | Omitted): Alternative names for the node - object_3d (DirectRelationReference | tuple[str, str] | None | Omitted): Direct relation to object3D grouping for this node - model_3d (DirectRelationReference | tuple[str, str] | None | Omitted): Direct relation to Cognite3DModel instance - volume_references (list[str] | None | Omitted): Unique volume metric hashes used to access the 3D specialized data storage - revisions (list[DirectRelationReference | tuple[str, str]] | None | Omitted): List of direct relations to revision information - volume_type (Literal['Box', 'Cylinder'] | None | Omitted): Type of volume (Cylinder or Box) - volume (list[float] | None | Omitted): Relevant coordinates for the volume type, 9 floats in total, that defines the volume - format_version (str | None | Omitted): Specifies the version the 'volume' field is following. Volume definition is today 9 floats (property volume) - existing_version (int | None): Fail the ingestion request if the node's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the node (for the specified container or node). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. - type (DirectRelationReference | tuple[str, str] | None | Omitted): Direct relation pointing to the type node. + space: The space where the node is located. + external_id: The external id of the Cognite point cloud volume. + name: Name of the instance + description: Description of the instance + tags: Text based labels for generic use, limited to 1000 + aliases: Alternative names for the node + object_3d: Direct relation to object3D grouping for this node + model_3d: Direct relation to Cognite3DModel instance + volume_references: Unique volume metric hashes used to access the 3D specialized data storage + revisions: List of direct relations to revision information + volume_type: Type of volume (Cylinder or Box) + volume: Relevant coordinates for the volume type, 9 floats in total, that defines the volume + format_version: Specifies the version the 'volume' field is following. Volume definition is today 9 floats (property volume) + existing_version: Fail the ingestion request if the node's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the node (for the specified container or node). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. + type: Direct relation pointing to the type node. """ def __init__( @@ -3221,24 +3221,24 @@ class CognitePointCloudVolume(_CognitePointCloudVolumeProperties, TypedNode): PointCloud volume definition Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite point cloud volume. - version (int): DMS version. - last_updated_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - created_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - name (str | None): Name of the instance - description (str | None): Description of the instance - tags (list[str] | None): Text based labels for generic use, limited to 1000 - aliases (list[str] | None): Alternative names for the node - object_3d (DirectRelationReference | None): Direct relation to object3D grouping for this node - model_3d (DirectRelationReference | None): Direct relation to Cognite3DModel instance - volume_references (list[str] | None): Unique volume metric hashes used to access the 3D specialized data storage - revisions (list[DirectRelationReference] | None): List of direct relations to revision information - volume_type (Literal['Box', 'Cylinder'] | None): Type of volume (Cylinder or Box) - volume (list[float] | None): Relevant coordinates for the volume type, 9 floats in total, that defines the volume - format_version (str | None): Specifies the version the 'volume' field is following. Volume definition is today 9 floats (property volume) - type (DirectRelationReference | None): Direct relation pointing to the type node. - deleted_time (int | None): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results + space: The space where the node is located. + external_id: The external id of the Cognite point cloud volume. + version: DMS version. + last_updated_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + created_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + name: Name of the instance + description: Description of the instance + tags: Text based labels for generic use, limited to 1000 + aliases: Alternative names for the node + object_3d: Direct relation to object3D grouping for this node + model_3d: Direct relation to Cognite3DModel instance + volume_references: Unique volume metric hashes used to access the 3D specialized data storage + revisions: List of direct relations to revision information + volume_type: Type of volume (Cylinder or Box) + volume: Relevant coordinates for the volume type, 9 floats in total, that defines the volume + format_version: Specifies the version the 'volume' field is following. Volume definition is today 9 floats (property volume) + type: Direct relation pointing to the type node. + deleted_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results """ def __init__( @@ -3315,14 +3315,14 @@ class CogniteSchedulableApply(_CogniteSchedulableProperties, TypedNodeApply): CogniteSchedulable represents the metadata about when an activity (or similar) starts and ends. Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite schedulable. - start_time (datetime | None | Omitted): The actual start time of an activity (or similar that extends this) - end_time (datetime | None | Omitted): The actual end time of an activity (or similar that extends this) - scheduled_start_time (datetime | None | Omitted): The planned start time of an activity (or similar that extends this) - scheduled_end_time (datetime | None | Omitted): The planned end time of an activity (or similar that extends this) - existing_version (int | None): Fail the ingestion request if the node's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the node (for the specified container or node). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. - type (DirectRelationReference | tuple[str, str] | None | Omitted): Direct relation pointing to the type node. + space: The space where the node is located. + external_id: The external id of the Cognite schedulable. + start_time: The actual start time of an activity (or similar that extends this) + end_time: The actual end time of an activity (or similar that extends this) + scheduled_start_time: The planned start time of an activity (or similar that extends this) + scheduled_end_time: The planned end time of an activity (or similar that extends this) + existing_version: Fail the ingestion request if the node's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the node (for the specified container or node). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. + type: Direct relation pointing to the type node. """ def __init__( @@ -3352,17 +3352,17 @@ class CogniteSchedulable(_CogniteSchedulableProperties, TypedNode): CogniteSchedulable represents the metadata about when an activity (or similar) starts and ends. Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite schedulable. - version (int): DMS version. - last_updated_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - created_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - start_time (datetime | None): The actual start time of an activity (or similar that extends this) - end_time (datetime | None): The actual end time of an activity (or similar that extends this) - scheduled_start_time (datetime | None): The planned start time of an activity (or similar that extends this) - scheduled_end_time (datetime | None): The planned end time of an activity (or similar that extends this) - type (DirectRelationReference | None): Direct relation pointing to the type node. - deleted_time (int | None): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results + space: The space where the node is located. + external_id: The external id of the Cognite schedulable. + version: DMS version. + last_updated_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + created_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + start_time: The actual start time of an activity (or similar that extends this) + end_time: The actual end time of an activity (or similar that extends this) + scheduled_start_time: The planned start time of an activity (or similar that extends this) + scheduled_end_time: The planned end time of an activity (or similar that extends this) + type: Direct relation pointing to the type node. + deleted_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results """ def __init__( @@ -3415,16 +3415,16 @@ class CogniteSourceSystemApply(_CogniteSourceSystemProperties, TypedNodeApply): The CogniteSourceSystem core concept is used to standardize the way source system is stored. Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite source system. - name (str | None | Omitted): Name of the instance - description (str | None | Omitted): Description of the instance - tags (list[str] | None | Omitted): Text based labels for generic use, limited to 1000 - aliases (list[str] | None | Omitted): Alternative names for the node - source_system_version (str | None | Omitted): Version identifier for the source system - manufacturer (str | None | Omitted): Manufacturer of the source system - existing_version (int | None): Fail the ingestion request if the node's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the node (for the specified container or node). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. - type (DirectRelationReference | tuple[str, str] | None | Omitted): Direct relation pointing to the type node. + space: The space where the node is located. + external_id: The external id of the Cognite source system. + name: Name of the instance + description: Description of the instance + tags: Text based labels for generic use, limited to 1000 + aliases: Alternative names for the node + source_system_version: Version identifier for the source system + manufacturer: Manufacturer of the source system + existing_version: Fail the ingestion request if the node's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the node (for the specified container or node). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. + type: Direct relation pointing to the type node. """ def __init__( @@ -3458,19 +3458,19 @@ class CogniteSourceSystem(_CogniteSourceSystemProperties, TypedNode): The CogniteSourceSystem core concept is used to standardize the way source system is stored. Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite source system. - version (int): DMS version. - last_updated_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - created_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - name (str | None): Name of the instance - description (str | None): Description of the instance - tags (list[str] | None): Text based labels for generic use, limited to 1000 - aliases (list[str] | None): Alternative names for the node - source_system_version (str | None): Version identifier for the source system - manufacturer (str | None): Manufacturer of the source system - type (DirectRelationReference | None): Direct relation pointing to the type node. - deleted_time (int | None): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results + space: The space where the node is located. + external_id: The external id of the Cognite source system. + version: DMS version. + last_updated_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + created_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + name: Name of the instance + description: Description of the instance + tags: Text based labels for generic use, limited to 1000 + aliases: Alternative names for the node + source_system_version: Version identifier for the source system + manufacturer: Manufacturer of the source system + type: Direct relation pointing to the type node. + deleted_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results """ def __init__( @@ -3532,17 +3532,17 @@ class CogniteSourceableNodeApply(_CogniteSourceableProperties, TypedNodeApply): It is used when data is written to CDF. Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite sourceable node. - source_id (str | None | Omitted): Identifier from the source system - source_context (str | None | Omitted): Context of the source id. For systems where the sourceId is globally unique, the sourceContext is expected to not be set. - source (DirectRelationReference | tuple[str, str] | None | Omitted): Direct relation to a source system - source_created_time (datetime | None | Omitted): When the instance was created in source system (if available) - source_updated_time (datetime | None | Omitted): When the instance was last updated in the source system (if available) - source_created_user (str | None | Omitted): User identifier from the source system on who created the source data. This identifier is not guaranteed to match the user identifiers in CDF - source_updated_user (str | None | Omitted): User identifier from the source system on who last updated the source data. This identifier is not guaranteed to match the user identifiers in CDF - existing_version (int | None): Fail the ingestion request if the node's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the node (for the specified container or node). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. - type (DirectRelationReference | tuple[str, str] | None | Omitted): Direct relation pointing to the type node. + space: The space where the node is located. + external_id: The external id of the Cognite sourceable node. + source_id: Identifier from the source system + source_context: Context of the source id. For systems where the sourceId is globally unique, the sourceContext is expected to not be set. + source: Direct relation to a source system + source_created_time: When the instance was created in source system (if available) + source_updated_time: When the instance was last updated in the source system (if available) + source_created_user: User identifier from the source system on who created the source data. This identifier is not guaranteed to match the user identifiers in CDF + source_updated_user: User identifier from the source system on who last updated the source data. This identifier is not guaranteed to match the user identifiers in CDF + existing_version: Fail the ingestion request if the node's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the node (for the specified container or node). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. + type: Direct relation pointing to the type node. """ def __init__( @@ -3576,20 +3576,20 @@ class CogniteSourceableNode(_CogniteSourceableProperties, TypedNode): It is used when data is read from CDF. Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite sourceable node. - version (int): DMS version. - last_updated_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - created_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - source_id (str | None): Identifier from the source system - source_context (str | None): Context of the source id. For systems where the sourceId is globally unique, the sourceContext is expected to not be set. - source (DirectRelationReference | None): Direct relation to a source system - source_created_time (datetime | None): When the instance was created in source system (if available) - source_updated_time (datetime | None): When the instance was last updated in the source system (if available) - source_created_user (str | None): User identifier from the source system on who created the source data. This identifier is not guaranteed to match the user identifiers in CDF - source_updated_user (str | None): User identifier from the source system on who last updated the source data. This identifier is not guaranteed to match the user identifiers in CDF - type (DirectRelationReference | None): Direct relation pointing to the type node. - deleted_time (int | None): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results + space: The space where the node is located. + external_id: The external id of the Cognite sourceable node. + version: DMS version. + last_updated_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + created_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + source_id: Identifier from the source system + source_context: Context of the source id. For systems where the sourceId is globally unique, the sourceContext is expected to not be set. + source: Direct relation to a source system + source_created_time: When the instance was created in source system (if available) + source_updated_time: When the instance was last updated in the source system (if available) + source_created_user: User identifier from the source system on who created the source data. This identifier is not guaranteed to match the user identifiers in CDF + source_updated_user: User identifier from the source system on who last updated the source data. This identifier is not guaranteed to match the user identifiers in CDF + type: Direct relation pointing to the type node. + deleted_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results """ def __init__( @@ -3659,27 +3659,27 @@ class CogniteTimeSeriesApply(_CogniteTimeSeriesProperties, TypedNodeApply): Represents a series of data points in time order." Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite time series. - is_step (bool): Specifies whether the time series is a step time series or not. - time_series_type (Literal['numeric', 'string']): Specifies the data type of the data points. - name (str | None | Omitted): Name of the instance - description (str | None | Omitted): Description of the instance - tags (list[str] | None | Omitted): Text based labels for generic use, limited to 1000 - aliases (list[str] | None | Omitted): Alternative names for the node - source_id (str | None | Omitted): Identifier from the source system - source_context (str | None | Omitted): Context of the source id. For systems where the sourceId is globally unique, the sourceContext is expected to not be set. - source (DirectRelationReference | tuple[str, str] | None | Omitted): Direct relation to a source system - source_created_time (datetime | None | Omitted): When the instance was created in source system (if available) - source_updated_time (datetime | None | Omitted): When the instance was last updated in the source system (if available) - source_created_user (str | None | Omitted): User identifier from the source system on who created the source data. This identifier is not guaranteed to match the user identifiers in CDF - source_updated_user (str | None | Omitted): User identifier from the source system on who last updated the source data. This identifier is not guaranteed to match the user identifiers in CDF - source_unit (str | None | Omitted): The unit specified in the source system. - unit (DirectRelationReference | tuple[str, str] | None | Omitted): The unit of the time series. - assets (list[DirectRelationReference | tuple[str, str]] | None | Omitted): A list of assets the time series is related to. - equipment (list[DirectRelationReference | tuple[str, str]] | None | Omitted): A list of equipment the time series is related to. - existing_version (int | None): Fail the ingestion request if the node's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the node (for the specified container or node). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. - type (DirectRelationReference | tuple[str, str] | None | Omitted): Direct relation pointing to the type node. + space: The space where the node is located. + external_id: The external id of the Cognite time series. + is_step: Specifies whether the time series is a step time series or not. + time_series_type: Specifies the data type of the data points. + name: Name of the instance + description: Description of the instance + tags: Text based labels for generic use, limited to 1000 + aliases: Alternative names for the node + source_id: Identifier from the source system + source_context: Context of the source id. For systems where the sourceId is globally unique, the sourceContext is expected to not be set. + source: Direct relation to a source system + source_created_time: When the instance was created in source system (if available) + source_updated_time: When the instance was last updated in the source system (if available) + source_created_user: User identifier from the source system on who created the source data. This identifier is not guaranteed to match the user identifiers in CDF + source_updated_user: User identifier from the source system on who last updated the source data. This identifier is not guaranteed to match the user identifiers in CDF + source_unit: The unit specified in the source system. + unit: The unit of the time series. + assets: A list of assets the time series is related to. + equipment: A list of equipment the time series is related to. + existing_version: Fail the ingestion request if the node's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the node (for the specified container or node). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. + type: Direct relation pointing to the type node. """ def __init__( @@ -3735,30 +3735,30 @@ class CogniteTimeSeries(_CogniteTimeSeriesProperties, TypedNode): Represents a series of data points in time order." Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite time series. - version (int): DMS version. - last_updated_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - created_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - is_step (bool): Specifies whether the time series is a step time series or not. - time_series_type (Literal['numeric', 'string']): Specifies the data type of the data points. - name (str | None): Name of the instance - description (str | None): Description of the instance - tags (list[str] | None): Text based labels for generic use, limited to 1000 - aliases (list[str] | None): Alternative names for the node - source_id (str | None): Identifier from the source system - source_context (str | None): Context of the source id. For systems where the sourceId is globally unique, the sourceContext is expected to not be set. - source (DirectRelationReference | None): Direct relation to a source system - source_created_time (datetime | None): When the instance was created in source system (if available) - source_updated_time (datetime | None): When the instance was last updated in the source system (if available) - source_created_user (str | None): User identifier from the source system on who created the source data. This identifier is not guaranteed to match the user identifiers in CDF - source_updated_user (str | None): User identifier from the source system on who last updated the source data. This identifier is not guaranteed to match the user identifiers in CDF - source_unit (str | None): The unit specified in the source system. - unit (DirectRelationReference | None): The unit of the time series. - assets (list[DirectRelationReference] | None): A list of assets the time series is related to. - equipment (list[DirectRelationReference] | None): A list of equipment the time series is related to. - type (DirectRelationReference | None): Direct relation pointing to the type node. - deleted_time (int | None): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results + space: The space where the node is located. + external_id: The external id of the Cognite time series. + version: DMS version. + last_updated_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + created_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + is_step: Specifies whether the time series is a step time series or not. + time_series_type: Specifies the data type of the data points. + name: Name of the instance + description: Description of the instance + tags: Text based labels for generic use, limited to 1000 + aliases: Alternative names for the node + source_id: Identifier from the source system + source_context: Context of the source id. For systems where the sourceId is globally unique, the sourceContext is expected to not be set. + source: Direct relation to a source system + source_created_time: When the instance was created in source system (if available) + source_updated_time: When the instance was last updated in the source system (if available) + source_created_user: User identifier from the source system on who created the source data. This identifier is not guaranteed to match the user identifiers in CDF + source_updated_user: User identifier from the source system on who last updated the source data. This identifier is not guaranteed to match the user identifiers in CDF + source_unit: The unit specified in the source system. + unit: The unit of the time series. + assets: A list of assets the time series is related to. + equipment: A list of equipment the time series is related to. + type: Direct relation pointing to the type node. + deleted_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results """ def __init__( @@ -3850,18 +3850,18 @@ class CogniteUnitApply(_CogniteUnitProperties, TypedNodeApply): Represents a single unit of measurement Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite unit. - name (str | None | Omitted): Name of the instance - description (str | None | Omitted): Description of the instance - tags (list[str] | None | Omitted): Text based labels for generic use, limited to 1000 - aliases (list[str] | None | Omitted): Alternative names for the node - symbol (str | None | Omitted): The symbol for the unit of measurement - quantity (str | None | Omitted): Specifies the physical quantity the unit measures - source (str | None | Omitted): Source of the unit definition - source_reference (str | None | Omitted): Reference to the source of the unit definition - existing_version (int | None): Fail the ingestion request if the node's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the node (for the specified container or node). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. - type (DirectRelationReference | tuple[str, str] | None | Omitted): Direct relation pointing to the type node. + space: The space where the node is located. + external_id: The external id of the Cognite unit. + name: Name of the instance + description: Description of the instance + tags: Text based labels for generic use, limited to 1000 + aliases: Alternative names for the node + symbol: The symbol for the unit of measurement + quantity: Specifies the physical quantity the unit measures + source: Source of the unit definition + source_reference: Reference to the source of the unit definition + existing_version: Fail the ingestion request if the node's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the node (for the specified container or node). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. + type: Direct relation pointing to the type node. """ def __init__( @@ -3899,21 +3899,21 @@ class CogniteUnit(_CogniteUnitProperties, TypedNode): Represents a single unit of measurement Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite unit. - version (int): DMS version. - last_updated_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - created_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - name (str | None): Name of the instance - description (str | None): Description of the instance - tags (list[str] | None): Text based labels for generic use, limited to 1000 - aliases (list[str] | None): Alternative names for the node - symbol (str | None): The symbol for the unit of measurement - quantity (str | None): Specifies the physical quantity the unit measures - source (str | None): Source of the unit definition - source_reference (str | None): Reference to the source of the unit definition - type (DirectRelationReference | None): Direct relation pointing to the type node. - deleted_time (int | None): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results + space: The space where the node is located. + external_id: The external id of the Cognite unit. + version: DMS version. + last_updated_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + created_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + name: Name of the instance + description: Description of the instance + tags: Text based labels for generic use, limited to 1000 + aliases: Alternative names for the node + symbol: The symbol for the unit of measurement + quantity: Specifies the physical quantity the unit measures + source: Source of the unit definition + source_reference: Reference to the source of the unit definition + type: Direct relation pointing to the type node. + deleted_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results """ def __init__( @@ -3978,11 +3978,11 @@ class CogniteVisualizableApply(_CogniteVisualizableProperties, TypedNodeApply): CogniteVisualizable defines the standard way to reference a related 3D resource Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite visualizable. - object_3d (DirectRelationReference | tuple[str, str] | None | Omitted): Direct relation to an Object3D instance representing the 3D resource - existing_version (int | None): Fail the ingestion request if the node's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the node (for the specified container or node). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. - type (DirectRelationReference | tuple[str, str] | None | Omitted): Direct relation pointing to the type node. + space: The space where the node is located. + external_id: The external id of the Cognite visualizable. + object_3d: Direct relation to an Object3D instance representing the 3D resource + existing_version: Fail the ingestion request if the node's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the node (for the specified container or node). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. + type: Direct relation pointing to the type node. """ def __init__( @@ -4006,14 +4006,14 @@ class CogniteVisualizable(_CogniteVisualizableProperties, TypedNode): CogniteVisualizable defines the standard way to reference a related 3D resource Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite visualizable. - version (int): DMS version. - last_updated_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - created_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - object_3d (DirectRelationReference | None): Direct relation to an Object3D instance representing the 3D resource - type (DirectRelationReference | None): Direct relation pointing to the type node. - deleted_time (int | None): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results + space: The space where the node is located. + external_id: The external id of the Cognite visualizable. + version: DMS version. + last_updated_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + created_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + object_3d: Direct relation to an Object3D instance representing the 3D resource + type: Direct relation pointing to the type node. + deleted_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results """ def __init__( @@ -4061,27 +4061,27 @@ class Cognite360ImageAnnotationApply(_Cognite360ImageAnnotationProperties, Typed It is used when data is written to CDF. Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite 360 image annotation. - type (DirectRelationReference | tuple[str, str]): The type of edge. - start_node (DirectRelationReference | tuple[str, str]): Reference to the direct relation. The reference consists of a space and an external-id. - end_node (DirectRelationReference | tuple[str, str]): Reference to the direct relation. The reference consists of a space and an external-id. - name (str | None | Omitted): Name of the instance - description (str | None | Omitted): Description of the instance - tags (list[str] | None | Omitted): Text based labels for generic use, limited to 1000 - aliases (list[str] | None | Omitted): Alternative names for the node - source_id (str | None | Omitted): Identifier from the source system - source_context (str | None | Omitted): Context of the source id. For systems where the sourceId is globally unique, the sourceContext is expected to not be set. - source (DirectRelationReference | tuple[str, str] | None | Omitted): Direct relation to a source system - source_created_time (datetime | None | Omitted): When the instance was created in source system (if available) - source_updated_time (datetime | None | Omitted): When the instance was last updated in the source system (if available) - source_created_user (str | None | Omitted): User identifier from the source system on who created the source data. This identifier is not guaranteed to match the user identifiers in CDF - source_updated_user (str | None | Omitted): User identifier from the source system on who last updated the source data. This identifier is not guaranteed to match the user identifiers in CDF - confidence (float | None | Omitted): The confidence that the annotation is a good match - status (Literal['Approved', 'Rejected', 'Suggested'] | None | Omitted): The status of the annotation - polygon (list[float] | None | Omitted): List of floats representing the polygon. Format depends on formatVersion - format_version (str | None | Omitted): Specifies the storage representation for the polygon - existing_version (int | None): Fail the ingestion request if the edge's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the edge (for the specified container or edge). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. + space: The space where the node is located. + external_id: The external id of the Cognite 360 image annotation. + type: The type of edge. + start_node: Reference to the direct relation. The reference consists of a space and an external-id. + end_node: Reference to the direct relation. The reference consists of a space and an external-id. + name: Name of the instance + description: Description of the instance + tags: Text based labels for generic use, limited to 1000 + aliases: Alternative names for the node + source_id: Identifier from the source system + source_context: Context of the source id. For systems where the sourceId is globally unique, the sourceContext is expected to not be set. + source: Direct relation to a source system + source_created_time: When the instance was created in source system (if available) + source_updated_time: When the instance was last updated in the source system (if available) + source_created_user: User identifier from the source system on who created the source data. This identifier is not guaranteed to match the user identifiers in CDF + source_updated_user: User identifier from the source system on who last updated the source data. This identifier is not guaranteed to match the user identifiers in CDF + confidence: The confidence that the annotation is a good match + status: The status of the annotation + polygon: List of floats representing the polygon. Format depends on formatVersion + format_version: Specifies the storage representation for the polygon + existing_version: Fail the ingestion request if the edge's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the edge (for the specified container or edge). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. """ def __init__( @@ -4133,30 +4133,30 @@ class Cognite360ImageAnnotation(_Cognite360ImageAnnotationProperties, TypedEdge) It is used when data is read from CDF. Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite 360 image annotation. - type (DirectRelationReference): The type of edge. - start_node (DirectRelationReference): Reference to the direct relation. The reference consists of a space and an external-id. - end_node (DirectRelationReference): Reference to the direct relation. The reference consists of a space and an external-id. - version (int): DMS version. - last_updated_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - created_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - name (str | None): Name of the instance - description (str | None): Description of the instance - tags (list[str] | None): Text based labels for generic use, limited to 1000 - aliases (list[str] | None): Alternative names for the node - source_id (str | None): Identifier from the source system - source_context (str | None): Context of the source id. For systems where the sourceId is globally unique, the sourceContext is expected to not be set. - source (DirectRelationReference | None): Direct relation to a source system - source_created_time (datetime | None): When the instance was created in source system (if available) - source_updated_time (datetime | None): When the instance was last updated in the source system (if available) - source_created_user (str | None): User identifier from the source system on who created the source data. This identifier is not guaranteed to match the user identifiers in CDF - source_updated_user (str | None): User identifier from the source system on who last updated the source data. This identifier is not guaranteed to match the user identifiers in CDF - confidence (float | None): The confidence that the annotation is a good match - status (Literal['Approved', 'Rejected', 'Suggested'] | None): The status of the annotation - polygon (list[float] | None): List of floats representing the polygon. Format depends on formatVersion - format_version (str | None): Specifies the storage representation for the polygon - deleted_time (int | None): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results + space: The space where the node is located. + external_id: The external id of the Cognite 360 image annotation. + type: The type of edge. + start_node: Reference to the direct relation. The reference consists of a space and an external-id. + end_node: Reference to the direct relation. The reference consists of a space and an external-id. + version: DMS version. + last_updated_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + created_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + name: Name of the instance + description: Description of the instance + tags: Text based labels for generic use, limited to 1000 + aliases: Alternative names for the node + source_id: Identifier from the source system + source_context: Context of the source id. For systems where the sourceId is globally unique, the sourceContext is expected to not be set. + source: Direct relation to a source system + source_created_time: When the instance was created in source system (if available) + source_updated_time: When the instance was last updated in the source system (if available) + source_created_user: User identifier from the source system on who created the source data. This identifier is not guaranteed to match the user identifiers in CDF + source_updated_user: User identifier from the source system on who last updated the source data. This identifier is not guaranteed to match the user identifiers in CDF + confidence: The confidence that the annotation is a good match + status: The status of the annotation + polygon: List of floats representing the polygon. Format depends on formatVersion + format_version: Specifies the storage representation for the polygon + deleted_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results """ def __init__( @@ -4240,21 +4240,21 @@ class Cognite3DTransformationEdgeApply(_Cognite3DTransformationProperties, Typed The Cognite3DTransformation object defines a comprehensive 3D transformation, enabling precise adjustments to an object's position, orientation, and size in the 3D coordinate system. It allows for the translation of objects along the three spatial axes, rotation around these axes using Euler angles, and scaling along each axis to modify the object's dimensions. The object's transformation is defined in "CDF space", a coordinate system where the positive Z axis is the up direction Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite 3D transformation edge. - type (DirectRelationReference | tuple[str, str]): The type of edge. - start_node (DirectRelationReference | tuple[str, str]): Reference to the direct relation. The reference consists of a space and an external-id. - end_node (DirectRelationReference | tuple[str, str]): Reference to the direct relation. The reference consists of a space and an external-id. - translation_x (float | None | Omitted): The displacement of the object along the X-axis in the 3D coordinate system - translation_y (float | None | Omitted): The displacement of the object along the Y-axis in the 3D coordinate system - translation_z (float | None | Omitted): The displacement of the object along the Z-axis in the 3D coordinate system - euler_rotation_x (float | None | Omitted): The rotation of the object around the X-axis in radians - euler_rotation_y (float | None | Omitted): The rotation of the object around the Y-axis in radians - euler_rotation_z (float | None | Omitted): The rotation of the object around the Z-axis in radians - scale_x (float | None | Omitted): The scaling factor applied to the object along the X-axis - scale_y (float | None | Omitted): The scaling factor applied to the object along the Y-axis - scale_z (float | None | Omitted): The scaling factor applied to the object along the Z-axis - existing_version (int | None): Fail the ingestion request if the edge's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the edge (for the specified container or edge). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. + space: The space where the node is located. + external_id: The external id of the Cognite 3D transformation edge. + type: The type of edge. + start_node: Reference to the direct relation. The reference consists of a space and an external-id. + end_node: Reference to the direct relation. The reference consists of a space and an external-id. + translation_x: The displacement of the object along the X-axis in the 3D coordinate system + translation_y: The displacement of the object along the Y-axis in the 3D coordinate system + translation_z: The displacement of the object along the Z-axis in the 3D coordinate system + euler_rotation_x: The rotation of the object around the X-axis in radians + euler_rotation_y: The rotation of the object around the Y-axis in radians + euler_rotation_z: The rotation of the object around the Z-axis in radians + scale_x: The scaling factor applied to the object along the X-axis + scale_y: The scaling factor applied to the object along the Y-axis + scale_z: The scaling factor applied to the object along the Z-axis + existing_version: Fail the ingestion request if the edge's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the edge (for the specified container or edge). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. """ def __init__( @@ -4296,24 +4296,24 @@ class Cognite3DTransformationEdge(_Cognite3DTransformationProperties, TypedEdge) The Cognite3DTransformation object defines a comprehensive 3D transformation, enabling precise adjustments to an object's position, orientation, and size in the 3D coordinate system. It allows for the translation of objects along the three spatial axes, rotation around these axes using Euler angles, and scaling along each axis to modify the object's dimensions. The object's transformation is defined in "CDF space", a coordinate system where the positive Z axis is the up direction Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite 3D transformation edge. - type (DirectRelationReference): The type of edge. - start_node (DirectRelationReference): Reference to the direct relation. The reference consists of a space and an external-id. - end_node (DirectRelationReference): Reference to the direct relation. The reference consists of a space and an external-id. - version (int): DMS version. - last_updated_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - created_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - translation_x (float | None): The displacement of the object along the X-axis in the 3D coordinate system - translation_y (float | None): The displacement of the object along the Y-axis in the 3D coordinate system - translation_z (float | None): The displacement of the object along the Z-axis in the 3D coordinate system - euler_rotation_x (float | None): The rotation of the object around the X-axis in radians - euler_rotation_y (float | None): The rotation of the object around the Y-axis in radians - euler_rotation_z (float | None): The rotation of the object around the Z-axis in radians - scale_x (float | None): The scaling factor applied to the object along the X-axis - scale_y (float | None): The scaling factor applied to the object along the Y-axis - scale_z (float | None): The scaling factor applied to the object along the Z-axis - deleted_time (int | None): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results + space: The space where the node is located. + external_id: The external id of the Cognite 3D transformation edge. + type: The type of edge. + start_node: Reference to the direct relation. The reference consists of a space and an external-id. + end_node: Reference to the direct relation. The reference consists of a space and an external-id. + version: DMS version. + last_updated_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + created_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + translation_x: The displacement of the object along the X-axis in the 3D coordinate system + translation_y: The displacement of the object along the Y-axis in the 3D coordinate system + translation_z: The displacement of the object along the Z-axis in the 3D coordinate system + euler_rotation_x: The rotation of the object around the X-axis in radians + euler_rotation_y: The rotation of the object around the Y-axis in radians + euler_rotation_z: The rotation of the object around the Z-axis in radians + scale_x: The scaling factor applied to the object along the X-axis + scale_y: The scaling factor applied to the object along the Y-axis + scale_z: The scaling factor applied to the object along the Z-axis + deleted_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results """ def __init__( @@ -4392,25 +4392,25 @@ class CogniteAnnotationApply(_CogniteAnnotationProperties, TypedEdgeApply): Annotation represents contextualization results or links Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite annotation. - type (DirectRelationReference | tuple[str, str]): The type of edge. - start_node (DirectRelationReference | tuple[str, str]): Reference to the direct relation. The reference consists of a space and an external-id. - end_node (DirectRelationReference | tuple[str, str]): Reference to the direct relation. The reference consists of a space and an external-id. - name (str | None | Omitted): Name of the instance - description (str | None | Omitted): Description of the instance - tags (list[str] | None | Omitted): Text based labels for generic use, limited to 1000 - aliases (list[str] | None | Omitted): Alternative names for the node - source_id (str | None | Omitted): Identifier from the source system - source_context (str | None | Omitted): Context of the source id. For systems where the sourceId is globally unique, the sourceContext is expected to not be set. - source (DirectRelationReference | tuple[str, str] | None | Omitted): Direct relation to a source system - source_created_time (datetime | None | Omitted): When the instance was created in source system (if available) - source_updated_time (datetime | None | Omitted): When the instance was last updated in the source system (if available) - source_created_user (str | None | Omitted): User identifier from the source system on who created the source data. This identifier is not guaranteed to match the user identifiers in CDF - source_updated_user (str | None | Omitted): User identifier from the source system on who last updated the source data. This identifier is not guaranteed to match the user identifiers in CDF - confidence (float | None | Omitted): The confidence that the annotation is a good match - status (Literal['Approved', 'Rejected', 'Suggested'] | None | Omitted): The status of the annotation - existing_version (int | None): Fail the ingestion request if the edge's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the edge (for the specified container or edge). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. + space: The space where the node is located. + external_id: The external id of the Cognite annotation. + type: The type of edge. + start_node: Reference to the direct relation. The reference consists of a space and an external-id. + end_node: Reference to the direct relation. The reference consists of a space and an external-id. + name: Name of the instance + description: Description of the instance + tags: Text based labels for generic use, limited to 1000 + aliases: Alternative names for the node + source_id: Identifier from the source system + source_context: Context of the source id. For systems where the sourceId is globally unique, the sourceContext is expected to not be set. + source: Direct relation to a source system + source_created_time: When the instance was created in source system (if available) + source_updated_time: When the instance was last updated in the source system (if available) + source_created_user: User identifier from the source system on who created the source data. This identifier is not guaranteed to match the user identifiers in CDF + source_updated_user: User identifier from the source system on who last updated the source data. This identifier is not guaranteed to match the user identifiers in CDF + confidence: The confidence that the annotation is a good match + status: The status of the annotation + existing_version: Fail the ingestion request if the edge's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the edge (for the specified container or edge). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. """ def __init__( @@ -4460,28 +4460,28 @@ class CogniteAnnotation(_CogniteAnnotationProperties, TypedEdge): Annotation represents contextualization results or links Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite annotation. - type (DirectRelationReference): The type of edge. - start_node (DirectRelationReference): Reference to the direct relation. The reference consists of a space and an external-id. - end_node (DirectRelationReference): Reference to the direct relation. The reference consists of a space and an external-id. - version (int): DMS version. - last_updated_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - created_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - name (str | None): Name of the instance - description (str | None): Description of the instance - tags (list[str] | None): Text based labels for generic use, limited to 1000 - aliases (list[str] | None): Alternative names for the node - source_id (str | None): Identifier from the source system - source_context (str | None): Context of the source id. For systems where the sourceId is globally unique, the sourceContext is expected to not be set. - source (DirectRelationReference | None): Direct relation to a source system - source_created_time (datetime | None): When the instance was created in source system (if available) - source_updated_time (datetime | None): When the instance was last updated in the source system (if available) - source_created_user (str | None): User identifier from the source system on who created the source data. This identifier is not guaranteed to match the user identifiers in CDF - source_updated_user (str | None): User identifier from the source system on who last updated the source data. This identifier is not guaranteed to match the user identifiers in CDF - confidence (float | None): The confidence that the annotation is a good match - status (Literal['Approved', 'Rejected', 'Suggested'] | None): The status of the annotation - deleted_time (int | None): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results + space: The space where the node is located. + external_id: The external id of the Cognite annotation. + type: The type of edge. + start_node: Reference to the direct relation. The reference consists of a space and an external-id. + end_node: Reference to the direct relation. The reference consists of a space and an external-id. + version: DMS version. + last_updated_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + created_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + name: Name of the instance + description: Description of the instance + tags: Text based labels for generic use, limited to 1000 + aliases: Alternative names for the node + source_id: Identifier from the source system + source_context: Context of the source id. For systems where the sourceId is globally unique, the sourceContext is expected to not be set. + source: Direct relation to a source system + source_created_time: When the instance was created in source system (if available) + source_updated_time: When the instance was last updated in the source system (if available) + source_created_user: User identifier from the source system on who created the source data. This identifier is not guaranteed to match the user identifiers in CDF + source_updated_user: User identifier from the source system on who last updated the source data. This identifier is not guaranteed to match the user identifiers in CDF + confidence: The confidence that the annotation is a good match + status: The status of the annotation + deleted_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results """ def __init__( @@ -4559,16 +4559,16 @@ class CogniteDescribableEdgeApply(_CogniteDescribableProperties, TypedEdgeApply) The describable core concept is used as a standard way of holding the bare minimum of information about the instance Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite describable edge. - type (DirectRelationReference | tuple[str, str]): The type of edge. - start_node (DirectRelationReference | tuple[str, str]): Reference to the direct relation. The reference consists of a space and an external-id. - end_node (DirectRelationReference | tuple[str, str]): Reference to the direct relation. The reference consists of a space and an external-id. - name (str | None | Omitted): Name of the instance - description (str | None | Omitted): Description of the instance - tags (list[str] | None | Omitted): Text based labels for generic use, limited to 1000 - aliases (list[str] | None | Omitted): Alternative names for the node - existing_version (int | None): Fail the ingestion request if the edge's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the edge (for the specified container or edge). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. + space: The space where the node is located. + external_id: The external id of the Cognite describable edge. + type: The type of edge. + start_node: Reference to the direct relation. The reference consists of a space and an external-id. + end_node: Reference to the direct relation. The reference consists of a space and an external-id. + name: Name of the instance + description: Description of the instance + tags: Text based labels for generic use, limited to 1000 + aliases: Alternative names for the node + existing_version: Fail the ingestion request if the edge's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the edge (for the specified container or edge). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. """ def __init__( @@ -4600,19 +4600,19 @@ class CogniteDescribableEdge(_CogniteDescribableProperties, TypedEdge): The describable core concept is used as a standard way of holding the bare minimum of information about the instance Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite describable edge. - type (DirectRelationReference): The type of edge. - start_node (DirectRelationReference): Reference to the direct relation. The reference consists of a space and an external-id. - end_node (DirectRelationReference): Reference to the direct relation. The reference consists of a space and an external-id. - version (int): DMS version. - last_updated_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - created_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - name (str | None): Name of the instance - description (str | None): Description of the instance - tags (list[str] | None): Text based labels for generic use, limited to 1000 - aliases (list[str] | None): Alternative names for the node - deleted_time (int | None): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results + space: The space where the node is located. + external_id: The external id of the Cognite describable edge. + type: The type of edge. + start_node: Reference to the direct relation. The reference consists of a space and an external-id. + end_node: Reference to the direct relation. The reference consists of a space and an external-id. + version: DMS version. + last_updated_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + created_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + name: Name of the instance + description: Description of the instance + tags: Text based labels for generic use, limited to 1000 + aliases: Alternative names for the node + deleted_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results """ def __init__( @@ -4688,37 +4688,37 @@ class CogniteDiagramAnnotationApply(_CogniteDiagramAnnotationProperties, TypedEd Annotation for diagrams Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite diagram annotation. - type (DirectRelationReference | tuple[str, str]): The type of edge. - start_node (DirectRelationReference | tuple[str, str]): Reference to the direct relation. The reference consists of a space and an external-id. - end_node (DirectRelationReference | tuple[str, str]): Reference to the direct relation. The reference consists of a space and an external-id. - name (str | None | Omitted): Name of the instance - description (str | None | Omitted): Description of the instance - tags (list[str] | None | Omitted): Text based labels for generic use, limited to 1000 - aliases (list[str] | None | Omitted): Alternative names for the node - source_id (str | None | Omitted): Identifier from the source system - source_context (str | None | Omitted): Context of the source id. For systems where the sourceId is globally unique, the sourceContext is expected to not be set. - source (DirectRelationReference | tuple[str, str] | None | Omitted): Direct relation to a source system - source_created_time (datetime | None | Omitted): When the instance was created in source system (if available) - source_updated_time (datetime | None | Omitted): When the instance was last updated in the source system (if available) - source_created_user (str | None | Omitted): User identifier from the source system on who created the source data. This identifier is not guaranteed to match the user identifiers in CDF - source_updated_user (str | None | Omitted): User identifier from the source system on who last updated the source data. This identifier is not guaranteed to match the user identifiers in CDF - confidence (float | None | Omitted): The confidence that the annotation is a good match - status (Literal['Approved', 'Rejected', 'Suggested'] | None | Omitted): The status of the annotation - start_node_page_number (int | None | Omitted): The number of the page on which this annotation is located in `startNode` File. The first page has number 1 - end_node_page_number (int | None | Omitted): The number of the page on which this annotation is located in the endNode File if an endNode is present. The first page has number 1 - start_node_x_min (float | None | Omitted): Value between [0,1]. Minimum abscissa of the bounding box (left edge). Must be strictly less than startNodeXMax - start_node_x_max (float | None | Omitted): Value between [0,1]. Maximum abscissa of the bounding box (right edge). Must be strictly more than startNodeXMin - start_node_y_min (float | None | Omitted): Value between [0,1]. Minimum ordinate of the bounding box (bottom edge). Must be strictly less than startNodeYMax - start_node_y_max (float | None | Omitted): Value between [0,1]. Maximum ordinate of the bounding box (top edge). Must be strictly more than startNodeYMin - start_node_text (str | None | Omitted): The text extracted from within the bounding box on the startNode - end_node_x_min (float | None | Omitted): Value between [0,1]. Minimum abscissa of the bounding box (left edge). Must be strictly less than endNodeXMax. Only applicable if an endNode is defined - end_node_x_max (float | None | Omitted): Value between [0,1]. Maximum abscissa of the bounding box (right edge). Must be strictly more than endNodeXMin. Only applicable if an endNode is defined - end_node_y_min (float | None | Omitted): Value between [0,1]. Minimum ordinate of the bounding box (bottom edge). Must be strictly less than endNodeYMax. Only applicable if an endNode is defined - end_node_y_max (float | None | Omitted): Value between [0,1]. Maximum ordinate of the bounding box (top edge). Must be strictly more than endNodeYMin. Only applicable if an endNode is defined - end_node_text (str | None | Omitted): The text extracted from within the bounding box on the endNode. Only applicable if an endNode is defined - existing_version (int | None): Fail the ingestion request if the edge's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the edge (for the specified container or edge). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. + space: The space where the node is located. + external_id: The external id of the Cognite diagram annotation. + type: The type of edge. + start_node: Reference to the direct relation. The reference consists of a space and an external-id. + end_node: Reference to the direct relation. The reference consists of a space and an external-id. + name: Name of the instance + description: Description of the instance + tags: Text based labels for generic use, limited to 1000 + aliases: Alternative names for the node + source_id: Identifier from the source system + source_context: Context of the source id. For systems where the sourceId is globally unique, the sourceContext is expected to not be set. + source: Direct relation to a source system + source_created_time: When the instance was created in source system (if available) + source_updated_time: When the instance was last updated in the source system (if available) + source_created_user: User identifier from the source system on who created the source data. This identifier is not guaranteed to match the user identifiers in CDF + source_updated_user: User identifier from the source system on who last updated the source data. This identifier is not guaranteed to match the user identifiers in CDF + confidence: The confidence that the annotation is a good match + status: The status of the annotation + start_node_page_number: The number of the page on which this annotation is located in `startNode` File. The first page has number 1 + end_node_page_number: The number of the page on which this annotation is located in the endNode File if an endNode is present. The first page has number 1 + start_node_x_min: Value between [0,1]. Minimum abscissa of the bounding box (left edge). Must be strictly less than startNodeXMax + start_node_x_max: Value between [0,1]. Maximum abscissa of the bounding box (right edge). Must be strictly more than startNodeXMin + start_node_y_min: Value between [0,1]. Minimum ordinate of the bounding box (bottom edge). Must be strictly less than startNodeYMax + start_node_y_max: Value between [0,1]. Maximum ordinate of the bounding box (top edge). Must be strictly more than startNodeYMin + start_node_text: The text extracted from within the bounding box on the startNode + end_node_x_min: Value between [0,1]. Minimum abscissa of the bounding box (left edge). Must be strictly less than endNodeXMax. Only applicable if an endNode is defined + end_node_x_max: Value between [0,1]. Maximum abscissa of the bounding box (right edge). Must be strictly more than endNodeXMin. Only applicable if an endNode is defined + end_node_y_min: Value between [0,1]. Minimum ordinate of the bounding box (bottom edge). Must be strictly less than endNodeYMax. Only applicable if an endNode is defined + end_node_y_max: Value between [0,1]. Maximum ordinate of the bounding box (top edge). Must be strictly more than endNodeYMin. Only applicable if an endNode is defined + end_node_text: The text extracted from within the bounding box on the endNode. Only applicable if an endNode is defined + existing_version: Fail the ingestion request if the edge's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the edge (for the specified container or edge). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. """ def __init__( @@ -4792,40 +4792,40 @@ class CogniteDiagramAnnotation(_CogniteDiagramAnnotationProperties, TypedEdge): Annotation for diagrams Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite diagram annotation. - type (DirectRelationReference): The type of edge. - start_node (DirectRelationReference): Reference to the direct relation. The reference consists of a space and an external-id. - end_node (DirectRelationReference): Reference to the direct relation. The reference consists of a space and an external-id. - version (int): DMS version. - last_updated_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - created_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - name (str | None): Name of the instance - description (str | None): Description of the instance - tags (list[str] | None): Text based labels for generic use, limited to 1000 - aliases (list[str] | None): Alternative names for the node - source_id (str | None): Identifier from the source system - source_context (str | None): Context of the source id. For systems where the sourceId is globally unique, the sourceContext is expected to not be set. - source (DirectRelationReference | None): Direct relation to a source system - source_created_time (datetime | None): When the instance was created in source system (if available) - source_updated_time (datetime | None): When the instance was last updated in the source system (if available) - source_created_user (str | None): User identifier from the source system on who created the source data. This identifier is not guaranteed to match the user identifiers in CDF - source_updated_user (str | None): User identifier from the source system on who last updated the source data. This identifier is not guaranteed to match the user identifiers in CDF - confidence (float | None): The confidence that the annotation is a good match - status (Literal['Approved', 'Rejected', 'Suggested'] | None): The status of the annotation - start_node_page_number (int | None): The number of the page on which this annotation is located in `startNode` File. The first page has number 1 - end_node_page_number (int | None): The number of the page on which this annotation is located in the endNode File if an endNode is present. The first page has number 1 - start_node_x_min (float | None): Value between [0,1]. Minimum abscissa of the bounding box (left edge). Must be strictly less than startNodeXMax - start_node_x_max (float | None): Value between [0,1]. Maximum abscissa of the bounding box (right edge). Must be strictly more than startNodeXMin - start_node_y_min (float | None): Value between [0,1]. Minimum ordinate of the bounding box (bottom edge). Must be strictly less than startNodeYMax - start_node_y_max (float | None): Value between [0,1]. Maximum ordinate of the bounding box (top edge). Must be strictly more than startNodeYMin - start_node_text (str | None): The text extracted from within the bounding box on the startNode - end_node_x_min (float | None): Value between [0,1]. Minimum abscissa of the bounding box (left edge). Must be strictly less than endNodeXMax. Only applicable if an endNode is defined - end_node_x_max (float | None): Value between [0,1]. Maximum abscissa of the bounding box (right edge). Must be strictly more than endNodeXMin. Only applicable if an endNode is defined - end_node_y_min (float | None): Value between [0,1]. Minimum ordinate of the bounding box (bottom edge). Must be strictly less than endNodeYMax. Only applicable if an endNode is defined - end_node_y_max (float | None): Value between [0,1]. Maximum ordinate of the bounding box (top edge). Must be strictly more than endNodeYMin. Only applicable if an endNode is defined - end_node_text (str | None): The text extracted from within the bounding box on the endNode. Only applicable if an endNode is defined - deleted_time (int | None): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results + space: The space where the node is located. + external_id: The external id of the Cognite diagram annotation. + type: The type of edge. + start_node: Reference to the direct relation. The reference consists of a space and an external-id. + end_node: Reference to the direct relation. The reference consists of a space and an external-id. + version: DMS version. + last_updated_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + created_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + name: Name of the instance + description: Description of the instance + tags: Text based labels for generic use, limited to 1000 + aliases: Alternative names for the node + source_id: Identifier from the source system + source_context: Context of the source id. For systems where the sourceId is globally unique, the sourceContext is expected to not be set. + source: Direct relation to a source system + source_created_time: When the instance was created in source system (if available) + source_updated_time: When the instance was last updated in the source system (if available) + source_created_user: User identifier from the source system on who created the source data. This identifier is not guaranteed to match the user identifiers in CDF + source_updated_user: User identifier from the source system on who last updated the source data. This identifier is not guaranteed to match the user identifiers in CDF + confidence: The confidence that the annotation is a good match + status: The status of the annotation + start_node_page_number: The number of the page on which this annotation is located in `startNode` File. The first page has number 1 + end_node_page_number: The number of the page on which this annotation is located in the endNode File if an endNode is present. The first page has number 1 + start_node_x_min: Value between [0,1]. Minimum abscissa of the bounding box (left edge). Must be strictly less than startNodeXMax + start_node_x_max: Value between [0,1]. Maximum abscissa of the bounding box (right edge). Must be strictly more than startNodeXMin + start_node_y_min: Value between [0,1]. Minimum ordinate of the bounding box (bottom edge). Must be strictly less than startNodeYMax + start_node_y_max: Value between [0,1]. Maximum ordinate of the bounding box (top edge). Must be strictly more than startNodeYMin + start_node_text: The text extracted from within the bounding box on the startNode + end_node_x_min: Value between [0,1]. Minimum abscissa of the bounding box (left edge). Must be strictly less than endNodeXMax. Only applicable if an endNode is defined + end_node_x_max: Value between [0,1]. Maximum abscissa of the bounding box (right edge). Must be strictly more than endNodeXMin. Only applicable if an endNode is defined + end_node_y_min: Value between [0,1]. Minimum ordinate of the bounding box (bottom edge). Must be strictly less than endNodeYMax. Only applicable if an endNode is defined + end_node_y_max: Value between [0,1]. Maximum ordinate of the bounding box (top edge). Must be strictly more than endNodeYMin. Only applicable if an endNode is defined + end_node_text: The text extracted from within the bounding box on the endNode. Only applicable if an endNode is defined + deleted_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results """ def __init__( @@ -4937,19 +4937,19 @@ class CogniteSourceableEdgeApply(_CogniteSourceableProperties, TypedEdgeApply): It is used when data is written to CDF. Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite sourceable edge. - type (DirectRelationReference | tuple[str, str]): The type of edge. - start_node (DirectRelationReference | tuple[str, str]): Reference to the direct relation. The reference consists of a space and an external-id. - end_node (DirectRelationReference | tuple[str, str]): Reference to the direct relation. The reference consists of a space and an external-id. - source_id (str | None | Omitted): Identifier from the source system - source_context (str | None | Omitted): Context of the source id. For systems where the sourceId is globally unique, the sourceContext is expected to not be set. - source (DirectRelationReference | tuple[str, str] | None | Omitted): Direct relation to a source system - source_created_time (datetime | None | Omitted): When the instance was created in source system (if available) - source_updated_time (datetime | None | Omitted): When the instance was last updated in the source system (if available) - source_created_user (str | None | Omitted): User identifier from the source system on who created the source data. This identifier is not guaranteed to match the user identifiers in CDF - source_updated_user (str | None | Omitted): User identifier from the source system on who last updated the source data. This identifier is not guaranteed to match the user identifiers in CDF - existing_version (int | None): Fail the ingestion request if the edge's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the edge (for the specified container or edge). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. + space: The space where the node is located. + external_id: The external id of the Cognite sourceable edge. + type: The type of edge. + start_node: Reference to the direct relation. The reference consists of a space and an external-id. + end_node: Reference to the direct relation. The reference consists of a space and an external-id. + source_id: Identifier from the source system + source_context: Context of the source id. For systems where the sourceId is globally unique, the sourceContext is expected to not be set. + source: Direct relation to a source system + source_created_time: When the instance was created in source system (if available) + source_updated_time: When the instance was last updated in the source system (if available) + source_created_user: User identifier from the source system on who created the source data. This identifier is not guaranteed to match the user identifiers in CDF + source_updated_user: User identifier from the source system on who last updated the source data. This identifier is not guaranteed to match the user identifiers in CDF + existing_version: Fail the ingestion request if the edge's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the edge (for the specified container or edge). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. """ def __init__( @@ -4985,22 +4985,22 @@ class CogniteSourceableEdge(_CogniteSourceableProperties, TypedEdge): It is used when data is read from CDF. Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite sourceable edge. - type (DirectRelationReference): The type of edge. - start_node (DirectRelationReference): Reference to the direct relation. The reference consists of a space and an external-id. - end_node (DirectRelationReference): Reference to the direct relation. The reference consists of a space and an external-id. - version (int): DMS version. - last_updated_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - created_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - source_id (str | None): Identifier from the source system - source_context (str | None): Context of the source id. For systems where the sourceId is globally unique, the sourceContext is expected to not be set. - source (DirectRelationReference | None): Direct relation to a source system - source_created_time (datetime | None): When the instance was created in source system (if available) - source_updated_time (datetime | None): When the instance was last updated in the source system (if available) - source_created_user (str | None): User identifier from the source system on who created the source data. This identifier is not guaranteed to match the user identifiers in CDF - source_updated_user (str | None): User identifier from the source system on who last updated the source data. This identifier is not guaranteed to match the user identifiers in CDF - deleted_time (int | None): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results + space: The space where the node is located. + external_id: The external id of the Cognite sourceable edge. + type: The type of edge. + start_node: Reference to the direct relation. The reference consists of a space and an external-id. + end_node: Reference to the direct relation. The reference consists of a space and an external-id. + version: DMS version. + last_updated_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + created_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + source_id: Identifier from the source system + source_context: Context of the source id. For systems where the sourceId is globally unique, the sourceContext is expected to not be set. + source: Direct relation to a source system + source_created_time: When the instance was created in source system (if available) + source_updated_time: When the instance was last updated in the source system (if available) + source_created_user: User identifier from the source system on who created the source data. This identifier is not guaranteed to match the user identifiers in CDF + source_updated_user: User identifier from the source system on who last updated the source data. This identifier is not guaranteed to match the user identifiers in CDF + deleted_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results """ def __init__( diff --git a/cognite/client/data_classes/data_modeling/containers.py b/cognite/client/data_classes/data_modeling/containers.py index 8de4f7cac0..9d096d1256 100644 --- a/cognite/client/data_classes/data_modeling/containers.py +++ b/cognite/client/data_classes/data_modeling/containers.py @@ -36,13 +36,13 @@ class ContainerCore(DataModelingSchemaResource["ContainerApply"], ABC): """Represent the physical storage of data. This is the base class for the read and write version. Args: - space (str): The workspace for the container, a unique identifier for the space. - external_id (str): Combined with the space is the unique identifier of the container. - properties (Mapping[str, ContainerPropertyCore]): We index the property by a local unique identifier. - description (str | None): Textual description of the container - name (str | None): Human readable name for the container. - constraints (Mapping[str, ConstraintCore] | None): Set of constraints to apply to the container - indexes (Mapping[str, IndexCore] | None): Set of indexes to apply to the container. + space: The workspace for the container, a unique identifier for the space. + external_id: Combined with the space is the unique identifier of the container. + properties: We index the property by a local unique identifier. + description: Textual description of the container + name: Human readable name for the container. + constraints: Set of constraints to apply to the container + indexes: Set of indexes to apply to the container. """ space: str @@ -100,14 +100,14 @@ class ContainerApply(ContainerCore): """Represent the physical storage of data. This is the write format of the container Args: - space (str): The workspace for the container, a unique identifier for the space. - external_id (str): Combined with the space is the unique identifier of the container. - properties (Mapping[str, ContainerPropertyApply]): We index the property by a local unique identifier. - description (str | None): Textual description of the container - name (str | None): Human readable name for the container. - used_for (Literal['node', 'edge', 'all'] | None): Should this operation apply to nodes, edges or both. - constraints (Mapping[str, ConstraintApply] | None): Set of constraints to apply to the container - indexes (Mapping[str, IndexApply] | None): Set of indexes to apply to the container. + space: The workspace for the container, a unique identifier for the space. + external_id: Combined with the space is the unique identifier of the container. + properties: We index the property by a local unique identifier. + description: Textual description of the container + name: Human readable name for the container. + used_for: Should this operation apply to nodes, edges or both. + constraints: Set of constraints to apply to the container + indexes: Set of indexes to apply to the container. """ properties: Mapping[str, ContainerPropertyApply] @@ -139,17 +139,17 @@ class Container(ContainerCore): """Represent the physical storage of data. This is the read format of the container Args: - space (str): The workspace for the container, a unique identifier for the space. - external_id (str): Combined with the space is the unique identifier of the container. - properties (Mapping[str, ContainerProperty]): We index the property by a local unique identifier. - is_global (bool): Whether this is a global container, i.e., one of the out-of-the-box models. - last_updated_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - created_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - description (str | None): Textual description of the container - name (str | None): Human readable name for the container. - used_for (Literal['node', 'edge', 'all']): Should this operation apply to nodes, edges or both. - constraints (Mapping[str, Constraint] | None): Set of constraints to apply to the container - indexes (Mapping[str, Index] | None): Set of indexes to apply to the container. + space: The workspace for the container, a unique identifier for the space. + external_id: Combined with the space is the unique identifier of the container. + properties: We index the property by a local unique identifier. + is_global: Whether this is a global container, i.e., one of the out-of-the-box models. + last_updated_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + created_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + description: Textual description of the container + name: Human readable name for the container. + used_for: Should this operation apply to nodes, edges or both. + constraints: Set of constraints to apply to the container + indexes: Set of indexes to apply to the container. """ properties: Mapping[str, ContainerProperty] @@ -197,7 +197,7 @@ def as_ids(self) -> list[ContainerId]: """Convert to a container id list. Returns: - list[ContainerId]: The container id list. + The container id list. """ return [v.as_id() for v in self] @@ -209,7 +209,7 @@ def as_apply(self) -> ContainerApplyList: """Convert to a ContainerApply list. Returns: - ContainerApplyList: The container apply list. + The container apply list. """ return ContainerApplyList([v.as_apply() for v in self]) @@ -217,7 +217,7 @@ def as_ids(self) -> list[ContainerId]: """Convert to a container id list. Returns: - list[ContainerId]: The container id list. + The container id list. """ return [v.as_id() for v in self] diff --git a/cognite/client/data_classes/data_modeling/data_models.py b/cognite/client/data_classes/data_modeling/data_models.py index 7f0ce1c311..4f7ed50f27 100644 --- a/cognite/client/data_classes/data_modeling/data_models.py +++ b/cognite/client/data_classes/data_modeling/data_models.py @@ -22,11 +22,11 @@ class DataModelCore(DataModelingSchemaResource["DataModelApply"], ABC): """A group of views. Args: - space (str): The workspace for the data model, a unique identifier for the space. - external_id (str): Combined with the space is the unique identifier of the data model. - version (str): DMS version. - description (str | None): Textual description of the data model - name (str | None): Human readable name for the data model. + space: The workspace for the data model, a unique identifier for the space. + external_id: Combined with the space is the unique identifier of the data model. + version: DMS version. + description: Textual description of the data model + name: Human readable name for the data model. """ def __init__( @@ -48,12 +48,12 @@ class DataModelApply(DataModelCore): """A group of views. This is the write version of a Data Model. Args: - space (str): The workspace for the data model, a unique identifier for the space. - external_id (str): Combined with the space is the unique identifier of the data model. - version (str): DMS version. - description (str | None): Textual description of the data model - name (str | None): Human readable name for the data model. - views (Sequence[ViewId | ViewApply] | None): List of views included in this data model. + space: The workspace for the data model, a unique identifier for the space. + external_id: Combined with the space is the unique identifier of the data model. + version: DMS version. + description: Textual description of the data model + name: Human readable name for the data model. + views: List of views included in this data model. """ def __init__( @@ -107,15 +107,15 @@ class DataModel(DataModelCore, Generic[T_View]): """A group of views. This is the read version of a Data Model Args: - space (str): The workspace for the data model, a unique identifier for the space. - external_id (str): Combined with the space is the unique identifier of the data model. - version (str): DMS version. - is_global (bool): Whether this is a global data model. - last_updated_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - created_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - description (str | None): Textual description of the data model - name (str | None): Human readable name for the data model. - views (list[T_View] | None): List of views included in this data model. + space: The workspace for the data model, a unique identifier for the space. + external_id: Combined with the space is the unique identifier of the data model. + version: DMS version. + is_global: Whether this is a global data model. + last_updated_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + created_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + description: Textual description of the data model + name: Human readable name for the data model. + views: List of views included in this data model. """ def __init__( @@ -196,7 +196,7 @@ def as_ids(self) -> list[DataModelId]: Convert the list of data models to a list of data model ids. Returns: - list[DataModelId]: The list of data model ids. + The list of data model ids. """ return [d.as_id() for d in self] @@ -210,7 +210,7 @@ def as_apply(self) -> DataModelApplyList: Convert the list of data models to a list of data model applies. Returns: - DataModelApplyList: The list of data model applies. + The list of data model applies. """ return DataModelApplyList([d.as_apply() for d in self]) @@ -220,10 +220,10 @@ def latest_version(self, key: Literal["created_time", "last_updated_time"] = "cr created_time or last_updated_time field. Args: - key (Literal['created_time', 'last_updated_time']): The field to use for determining the latest version. + key: The field to use for determining the latest version. Returns: - DataModel[T_View]: The data model with the latest version. + The data model with the latest version. """ if not self: raise ValueError("No data models in list") @@ -236,7 +236,7 @@ def as_ids(self) -> list[DataModelId]: Convert the list of data models to a list of data model ids. Returns: - list[DataModelId]: The list of data model ids. + The list of data model ids. """ return [d.as_id() for d in self] @@ -248,10 +248,10 @@ class DataModelFilter(CogniteFilter): """Represent the filer arguments for the list endpoint. Args: - space (str | None): The space to query - inline_views (bool): Whether to expand the referenced views inline in the returned result. - all_versions (bool): Whether to return all versions. If false, only the newest version is returned, which is determined based on the 'createdTime' field. - include_global (bool): Whether to include global views. + space: The space to query + inline_views: Whether to expand the referenced views inline in the returned result. + all_versions: Whether to return all versions. If false, only the newest version is returned, which is determined based on the 'createdTime' field. + include_global: Whether to include global views. """ def __init__( diff --git a/cognite/client/data_classes/data_modeling/debug.py b/cognite/client/data_classes/data_modeling/debug.py index 6f1cb18eb8..d1676dd052 100644 --- a/cognite/client/data_classes/data_modeling/debug.py +++ b/cognite/client/data_classes/data_modeling/debug.py @@ -19,12 +19,12 @@ class DebugInfo(CogniteResource): Contains the requested debug information. Args: - notices (DebugNoticeList | None): A list of notices that provide insights into the query's execution. + notices: A list of notices that provide insights into the query's execution. These can highlight potential performance issues, offer optimization suggestions, or explain aspects of the query processing. Each notice falls into a category, such as indexing, sorting, filtering, or cursoring, to help identify areas for improvement. - translated_query (TranslatedQuery | None): The internal representation of the query. - plan (ExecutionPlan | None): The execution plan for the query. + translated_query: The internal representation of the query. + plan: The execution plan for the query. """ notices: DebugNoticeList | None = None @@ -57,8 +57,8 @@ class TranslatedQuery(CogniteResource): Internal representation of query. Depends on postgres-controlled output, hence the generic dict types. Args: - query (object): Parameterized query. - parameters (object): Parameter values for query. + query: Parameterized query. + parameters: Parameter values for query. """ query: dict[str, Any] @@ -78,9 +78,9 @@ class ExecutionPlan(CogniteResource): Execution plan for the query. Args: - full_plan (dict[str, Any]): The full execution plan. - profiled (bool): The execution plan has been profiled. - by_identifier (dict[str, Any]): The execution plan grouped by query identifiers. + full_plan: The full execution plan. + profiled: The execution plan has been profiled. + by_identifier: The execution plan grouped by query identifiers. """ full_plan: dict[str, Any] @@ -109,11 +109,11 @@ class DebugParameters: Debug parameters for debugging and analyzing queries. Args: - emit_results (bool): Include the query result in the response. Using emit_results=False is required for advanced query analysis features. - timeout (int | None): Query timeout in milliseconds. Can be used to override the default timeout when analysing queries. Requires emit_results=False. - include_translated_query (bool): Include the internal representation of the query. - include_plan (bool): Include the execution plan for the query. - profile (bool): Most thorough level of query analysis. Requires emit_results=False. + emit_results: Include the query result in the response. Using emit_results=False is required for advanced query analysis features. + timeout: Query timeout in milliseconds. Can be used to override the default timeout when analysing queries. Requires emit_results=False. + include_translated_query: Include the internal representation of the query. + include_plan: Include the execution plan for the query. + profile: Most thorough level of query analysis. Requires emit_results=False. """ emit_results: bool = True diff --git a/cognite/client/data_classes/data_modeling/extractor_extensions/v1.py b/cognite/client/data_classes/data_modeling/extractor_extensions/v1.py index 93a84f01b7..423b64bba0 100644 --- a/cognite/client/data_classes/data_modeling/extractor_extensions/v1.py +++ b/cognite/client/data_classes/data_modeling/extractor_extensions/v1.py @@ -27,11 +27,11 @@ class CogniteExtractorDataApply(_CogniteExtractorDataProperties, TypedNodeApply) It is used when data is written to CDF. Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite extractor datum. - extracted_data (dict | None | Omitted): Unstructured information extracted from source system - existing_version (int | None): Fail the ingestion request if the node's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the node (for the specified container or node). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. - type (DirectRelationReference | tuple[str, str] | None | Omitted): Direct relation pointing to the type node. + space: The space where the node is located. + external_id: The external id of the Cognite extractor datum. + extracted_data: Unstructured information extracted from source system + existing_version: Fail the ingestion request if the node's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the node (for the specified container or node). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. + type: Direct relation pointing to the type node. """ def __init__( @@ -53,14 +53,14 @@ class CogniteExtractorData(_CogniteExtractorDataProperties, TypedNode): It is used when data is read from CDF. Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite extractor datum. - version (int): DMS version. - last_updated_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - created_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - extracted_data (dict | None): Unstructured information extracted from source system - type (DirectRelationReference | None): Direct relation pointing to the type node. - deleted_time (int | None): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results + space: The space where the node is located. + external_id: The external id of the Cognite extractor datum. + version: DMS version. + last_updated_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + created_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + extracted_data: Unstructured information extracted from source system + type: Direct relation pointing to the type node. + deleted_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results """ def __init__( @@ -109,26 +109,26 @@ class CogniteExtractorFileApply(_CogniteExtractorFileProperties, TypedNodeApply) It is used when data is written to CDF. Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite extractor file. - name (str | None | Omitted): Name of the instance - description (str | None | Omitted): Description of the instance - tags (list[str] | None | Omitted): Text based labels for generic use, limited to 1000 - aliases (list[str] | None | Omitted): Alternative names for the node - source_id (str | None | Omitted): Identifier from the source system - source_context (str | None | Omitted): Context of the source id. For systems where the sourceId is globally unique, the sourceContext is expected to not be set. - source (DirectRelationReference | tuple[str, str] | None | Omitted): Direct relation to a source system - source_created_time (datetime | None | Omitted): When the instance was created in source system (if available) - source_updated_time (datetime | None | Omitted): When the instance was last updated in the source system (if available) - source_created_user (str | None | Omitted): User identifier from the source system on who created the source data. This identifier is not guaranteed to match the user identifiers in CDF - source_updated_user (str | None | Omitted): User identifier from the source system on who last updated the source data. This identifier is not guaranteed to match the user identifiers in CDF - assets (list[DirectRelationReference | tuple[str, str]] | None | Omitted): List of assets this file relates to - mime_type (str | None | Omitted): MIME type of the file - directory (str | None | Omitted): Contains the path elements from the source (for when the source system has a file system hierarchy or similar) - category (DirectRelationReference | tuple[str, str] | None | Omitted): Direct relation to an instance of CogniteFileCategory representing the detected categorization/class for the file - extracted_data (dict | None | Omitted): Unstructured information extracted from source system - existing_version (int | None): Fail the ingestion request if the node's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the node (for the specified container or node). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. - type (DirectRelationReference | tuple[str, str] | None | Omitted): Direct relation pointing to the type node. + space: The space where the node is located. + external_id: The external id of the Cognite extractor file. + name: Name of the instance + description: Description of the instance + tags: Text based labels for generic use, limited to 1000 + aliases: Alternative names for the node + source_id: Identifier from the source system + source_context: Context of the source id. For systems where the sourceId is globally unique, the sourceContext is expected to not be set. + source: Direct relation to a source system + source_created_time: When the instance was created in source system (if available) + source_updated_time: When the instance was last updated in the source system (if available) + source_created_user: User identifier from the source system on who created the source data. This identifier is not guaranteed to match the user identifiers in CDF + source_updated_user: User identifier from the source system on who last updated the source data. This identifier is not guaranteed to match the user identifiers in CDF + assets: List of assets this file relates to + mime_type: MIME type of the file + directory: Contains the path elements from the source (for when the source system has a file system hierarchy or similar) + category: Direct relation to an instance of CogniteFileCategory representing the detected categorization/class for the file + extracted_data: Unstructured information extracted from source system + existing_version: Fail the ingestion request if the node's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the node (for the specified container or node). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. + type: Direct relation pointing to the type node. """ def __init__( @@ -180,31 +180,31 @@ class CogniteExtractorFile(_CogniteExtractorFileProperties, TypedNode): It is used when data is read from CDF. Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite extractor file. - version (int): DMS version. - last_updated_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - created_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - name (str | None): Name of the instance - description (str | None): Description of the instance - tags (list[str] | None): Text based labels for generic use, limited to 1000 - aliases (list[str] | None): Alternative names for the node - source_id (str | None): Identifier from the source system - source_context (str | None): Context of the source id. For systems where the sourceId is globally unique, the sourceContext is expected to not be set. - source (DirectRelationReference | None): Direct relation to a source system - source_created_time (datetime | None): When the instance was created in source system (if available) - source_updated_time (datetime | None): When the instance was last updated in the source system (if available) - source_created_user (str | None): User identifier from the source system on who created the source data. This identifier is not guaranteed to match the user identifiers in CDF - source_updated_user (str | None): User identifier from the source system on who last updated the source data. This identifier is not guaranteed to match the user identifiers in CDF - assets (list[DirectRelationReference] | None): List of assets this file relates to - mime_type (str | None): MIME type of the file - directory (str | None): Contains the path elements from the source (for when the source system has a file system hierarchy or similar) - is_uploaded (bool | None): Whether the file content has been uploaded to Cognite Data Fusion - uploaded_time (datetime | None): Point in time when the file upload was completed and the file was made available - category (DirectRelationReference | None): Direct relation to an instance of CogniteFileCategory representing the detected categorization/class for the file - extracted_data (dict | None): Unstructured information extracted from source system - type (DirectRelationReference | None): Direct relation pointing to the type node. - deleted_time (int | None): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results + space: The space where the node is located. + external_id: The external id of the Cognite extractor file. + version: DMS version. + last_updated_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + created_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + name: Name of the instance + description: Description of the instance + tags: Text based labels for generic use, limited to 1000 + aliases: Alternative names for the node + source_id: Identifier from the source system + source_context: Context of the source id. For systems where the sourceId is globally unique, the sourceContext is expected to not be set. + source: Direct relation to a source system + source_created_time: When the instance was created in source system (if available) + source_updated_time: When the instance was last updated in the source system (if available) + source_created_user: User identifier from the source system on who created the source data. This identifier is not guaranteed to match the user identifiers in CDF + source_updated_user: User identifier from the source system on who last updated the source data. This identifier is not guaranteed to match the user identifiers in CDF + assets: List of assets this file relates to + mime_type: MIME type of the file + directory: Contains the path elements from the source (for when the source system has a file system hierarchy or similar) + is_uploaded: Whether the file content has been uploaded to Cognite Data Fusion + uploaded_time: Point in time when the file upload was completed and the file was made available + category: Direct relation to an instance of CogniteFileCategory representing the detected categorization/class for the file + extracted_data: Unstructured information extracted from source system + type: Direct relation pointing to the type node. + deleted_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results """ is_uploaded = PropertyOptions("isUploaded") @@ -307,28 +307,28 @@ class CogniteExtractorTimeSeriesApply(_CogniteExtractorTimeSeriesProperties, Typ It is used when data is written to CDF. Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite extractor time series. - is_step (bool): Defines whether the time series is a step series or not. - time_series_type (Literal['numeric', 'string']): Defines data type of the data points. - name (str | None | Omitted): Name of the instance - description (str | None | Omitted): Description of the instance - tags (list[str] | None | Omitted): Text based labels for generic use, limited to 1000 - aliases (list[str] | None | Omitted): Alternative names for the node - source_id (str | None | Omitted): Identifier from the source system - source_context (str | None | Omitted): Context of the source id. For systems where the sourceId is globally unique, the sourceContext is expected to not be set. - source (DirectRelationReference | tuple[str, str] | None | Omitted): Direct relation to a source system - source_created_time (datetime | None | Omitted): When the instance was created in source system (if available) - source_updated_time (datetime | None | Omitted): When the instance was last updated in the source system (if available) - source_created_user (str | None | Omitted): User identifier from the source system on who created the source data. This identifier is not guaranteed to match the user identifiers in CDF - source_updated_user (str | None | Omitted): User identifier from the source system on who last updated the source data. This identifier is not guaranteed to match the user identifiers in CDF - source_unit (str | None | Omitted): Unit as specified in the source system - unit (DirectRelationReference | tuple[str, str] | None | Omitted): direct relation to the unit of the time series - assets (list[DirectRelationReference | tuple[str, str]] | None | Omitted): The asset field. - equipment (list[DirectRelationReference | tuple[str, str]] | None | Omitted): The equipment field. - extracted_data (dict | None | Omitted): Unstructured information extracted from source system - existing_version (int | None): Fail the ingestion request if the node's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the node (for the specified container or node). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. - type (DirectRelationReference | tuple[str, str] | None | Omitted): Direct relation pointing to the type node. + space: The space where the node is located. + external_id: The external id of the Cognite extractor time series. + is_step: Defines whether the time series is a step series or not. + time_series_type: Defines data type of the data points. + name: Name of the instance + description: Description of the instance + tags: Text based labels for generic use, limited to 1000 + aliases: Alternative names for the node + source_id: Identifier from the source system + source_context: Context of the source id. For systems where the sourceId is globally unique, the sourceContext is expected to not be set. + source: Direct relation to a source system + source_created_time: When the instance was created in source system (if available) + source_updated_time: When the instance was last updated in the source system (if available) + source_created_user: User identifier from the source system on who created the source data. This identifier is not guaranteed to match the user identifiers in CDF + source_updated_user: User identifier from the source system on who last updated the source data. This identifier is not guaranteed to match the user identifiers in CDF + source_unit: Unit as specified in the source system + unit: direct relation to the unit of the time series + assets: The asset field. + equipment: The equipment field. + extracted_data: Unstructured information extracted from source system + existing_version: Fail the ingestion request if the node's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the node (for the specified container or node). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. + type: Direct relation pointing to the type node. """ def __init__( @@ -384,31 +384,31 @@ class CogniteExtractorTimeSeries(_CogniteExtractorTimeSeriesProperties, TypedNod It is used when data is read from CDF. Args: - space (str): The space where the node is located. - external_id (str): The external id of the Cognite extractor time series. - version (int): DMS version. - last_updated_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - created_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - is_step (bool): Defines whether the time series is a step series or not. - time_series_type (Literal['numeric', 'string']): Defines data type of the data points. - name (str | None): Name of the instance - description (str | None): Description of the instance - tags (list[str] | None): Text based labels for generic use, limited to 1000 - aliases (list[str] | None): Alternative names for the node - source_id (str | None): Identifier from the source system - source_context (str | None): Context of the source id. For systems where the sourceId is globally unique, the sourceContext is expected to not be set. - source (DirectRelationReference | None): Direct relation to a source system - source_created_time (datetime | None): When the instance was created in source system (if available) - source_updated_time (datetime | None): When the instance was last updated in the source system (if available) - source_created_user (str | None): User identifier from the source system on who created the source data. This identifier is not guaranteed to match the user identifiers in CDF - source_updated_user (str | None): User identifier from the source system on who last updated the source data. This identifier is not guaranteed to match the user identifiers in CDF - source_unit (str | None): Unit as specified in the source system - unit (DirectRelationReference | None): direct relation to the unit of the time series - assets (list[DirectRelationReference] | None): The asset field. - equipment (list[DirectRelationReference] | None): The equipment field. - extracted_data (dict | None): Unstructured information extracted from source system - type (DirectRelationReference | None): Direct relation pointing to the type node. - deleted_time (int | None): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results + space: The space where the node is located. + external_id: The external id of the Cognite extractor time series. + version: DMS version. + last_updated_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + created_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + is_step: Defines whether the time series is a step series or not. + time_series_type: Defines data type of the data points. + name: Name of the instance + description: Description of the instance + tags: Text based labels for generic use, limited to 1000 + aliases: Alternative names for the node + source_id: Identifier from the source system + source_context: Context of the source id. For systems where the sourceId is globally unique, the sourceContext is expected to not be set. + source: Direct relation to a source system + source_created_time: When the instance was created in source system (if available) + source_updated_time: When the instance was last updated in the source system (if available) + source_created_user: User identifier from the source system on who created the source data. This identifier is not guaranteed to match the user identifiers in CDF + source_updated_user: User identifier from the source system on who last updated the source data. This identifier is not guaranteed to match the user identifiers in CDF + source_unit: Unit as specified in the source system + unit: direct relation to the unit of the time series + assets: The asset field. + equipment: The equipment field. + extracted_data: Unstructured information extracted from source system + type: Direct relation pointing to the type node. + deleted_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results """ def __init__( diff --git a/cognite/client/data_classes/data_modeling/instances.py b/cognite/client/data_classes/data_modeling/instances.py index ac12b7b173..93b85ab799 100644 --- a/cognite/client/data_classes/data_modeling/instances.py +++ b/cognite/client/data_classes/data_modeling/instances.py @@ -105,8 +105,8 @@ class NodeOrEdgeData(CogniteResource): """This represents the data values of a node or edge. Args: - source (ContainerId | ViewId): The container or view the node or edge property is in - properties (Mapping[str, PropertyValue]): The properties of the node or edge. + source: The container or view the node or edge property is in + properties: The properties of the node or edge. """ source: ContainerId | ViewId @@ -147,9 +147,9 @@ class InstanceCore(DataModelingResource, ABC): """A node or edge Args: - space (str): The workspace for the instance, a unique identifier for the space. - external_id (str): Combined with the space is the unique identifier of the instance. - instance_type (Literal['node', 'edge']): The type of instance. + space: The workspace for the instance, a unique identifier for the space. + external_id: Combined with the space is the unique identifier of the instance. + instance_type: The type of instance. """ def __init__(self, space: str, external_id: str, instance_type: Literal["node", "edge"]) -> None: @@ -169,11 +169,11 @@ class InstanceApply(WritableInstanceCore[T_CogniteResource], ABC): """A node or edge. This is the write version of the instance. Args: - space (str): The workspace for the instance, a unique identifier for the space. - external_id (str): Combined with the space is the unique identifier of the instance. - instance_type (Literal['node', 'edge']): The type of instance. - existing_version (int | None): Fail the ingestion request if the instance's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the instance (for the specified container or instance). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the instance already exists. If skipOnVersionConflict is set on the ingestion request, then the instance will be skipped instead of failing the ingestion request. - sources (list[NodeOrEdgeData] | None): List of source properties to write. The properties are from the instance and/or container the container(s) making up this node. + space: The workspace for the instance, a unique identifier for the space. + external_id: Combined with the space is the unique identifier of the instance. + instance_type: The type of instance. + existing_version: Fail the ingestion request if the instance's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the instance (for the specified container or instance). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the instance already exists. If skipOnVersionConflict is set on the ingestion request, then the instance will be skipped instead of failing the ingestion request. + sources: List of source properties to write. The properties are from the instance and/or container the container(s) making up this node. """ def __init__( @@ -323,14 +323,14 @@ class Instance(WritableInstanceCore[T_CogniteResource], ABC): """A node or edge. This is the read version of the instance. Args: - space (str): The workspace for the instance, a unique identifier for the space. - external_id (str): Combined with the space is the unique identifier of the instance. - version (int): Current version of the instance. - last_updated_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - created_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - instance_type (Literal['node', 'edge']): The type of instance. - deleted_time (int | None): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results - properties (Properties | None): Properties of the instance. + space: The workspace for the instance, a unique identifier for the space. + external_id: Combined with the space is the unique identifier of the instance. + version: Current version of the instance. + last_updated_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + created_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + instance_type: The type of instance. + deleted_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results + properties: Properties of the instance. """ def __init__( @@ -431,14 +431,14 @@ def to_pandas( # type: ignore [override] """Convert the instance into a pandas DataFrame. Args: - ignore (list[str] | None): List of row keys to skip when converting to a data frame. Is applied before expansions. - camel_case (bool): Convert attribute names to camel case (e.g. `externalId` instead of `external_id`). Does not affect properties if expanded. - convert_timestamps (bool): Convert known attributes storing CDF timestamps (milliseconds since epoch) to datetime. Does not affect properties. - expand_properties (bool): Expand the properties into separate rows. - remove_property_prefix (bool): Attempt to remove the view ID prefix from row names of expanded properties (in index). Requires data to be from a single view and that all property names do not conflict with base properties (e.g. 'space' or 'type'). In such cases, a warning is issued and the prefix is kept. + ignore: List of row keys to skip when converting to a data frame. Is applied before expansions. + camel_case: Convert attribute names to camel case (e.g. `externalId` instead of `external_id`). Does not affect properties if expanded. + convert_timestamps: Convert known attributes storing CDF timestamps (milliseconds since epoch) to datetime. Does not affect properties. + expand_properties: Expand the properties into separate rows. + remove_property_prefix: Attempt to remove the view ID prefix from row names of expanded properties (in index). Requires data to be from a single view and that all property names do not conflict with base properties (e.g. 'space' or 'type'). In such cases, a warning is issued and the prefix is kept. Returns: - pd.DataFrame: The dataframe. + The dataframe. """ df = super().to_pandas( expand_metadata=False, ignore=ignore, camel_case=camel_case, convert_timestamps=convert_timestamps @@ -484,13 +484,13 @@ class InstanceApplyResult(InstanceCore, ABC): """A node or edge. This represents the update on the instance. Args: - instance_type (Literal['node', 'edge']): The type of instance. - space (str): The workspace for the instance, a unique identifier for the space. - external_id (str): Combined with the space is the unique identifier of the instance. - version (int): DMS version of the instance. - was_modified (bool): Whether the instance was modified by the ingestion. - last_updated_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - created_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + instance_type: The type of instance. + space: The workspace for the instance, a unique identifier for the space. + external_id: Combined with the space is the unique identifier of the instance. + version: DMS version of the instance. + was_modified: Whether the instance was modified by the ingestion. + last_updated_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + created_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. """ def __init__( @@ -514,8 +514,8 @@ class InstanceAggregationResult(DataModelingResource): """Represents instances aggregation results. Args: - aggregates (list[AggregatedNumberedValue]): List of aggregated values. - group (dict[str, str | int | float | bool]): The grouping used for the aggregation. + aggregates: List of aggregated values. + group: The grouping used for the aggregation. """ def __init__(self, aggregates: list[AggregatedNumberedValue], group: dict[str, str | int | float | bool]) -> None: @@ -528,10 +528,10 @@ def _load(cls, resource: dict) -> Self: Loads an instance aggregation result from a json string or dictionary. Args: - resource (dict): No description. + resource: No description. Returns: - Self: An instance aggregation result. + An instance aggregation result. """ return cls( aggregates=[AggregatedNumberedValue.load(agg) for agg in resource["aggregates"]], @@ -543,10 +543,10 @@ def dump(self, camel_case: bool = True) -> dict[str, Any]: Dumps the aggregation results to a dictionary. Args: - camel_case (bool): Whether to convert the keys to camel case. + camel_case: Whether to convert the keys to camel case. Returns: - dict[str, Any]: A dictionary with the instance results. + A dictionary with the instance results. """ return { @@ -650,11 +650,11 @@ class NodeApply(InstanceApply["NodeApply"]): """A node. This is the write version of the node. Args: - space (str): The workspace for the node, a unique identifier for the space. - external_id (str): Combined with the space is the unique identifier of the node. - existing_version (int | None): Fail the ingestion request if the node's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the edge (for the specified container or node). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. - sources (list[NodeOrEdgeData] | None): List of source properties to write. The properties are from the node and/or container the container(s) making up this node. - type (DirectRelationReference | tuple[str, str] | None): Direct relation pointing to the type node. + space: The workspace for the node, a unique identifier for the space. + external_id: Combined with the space is the unique identifier of the node. + existing_version: Fail the ingestion request if the node's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the edge (for the specified container or node). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. + sources: List of source properties to write. The properties are from the node and/or container the container(s) making up this node. + type: Direct relation pointing to the type node. """ def __init__( @@ -696,14 +696,14 @@ class Node(Instance[NodeApply]): """A node. This is the read version of the node. Args: - space (str): The workspace for the node, a unique identifier for the space. - external_id (str): Combined with the space is the unique identifier of the node. - version (int): Current version of the node. - last_updated_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - created_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - deleted_time (int | None): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results - properties (Properties | None): Properties of the node. - type (DirectRelationReference | tuple[str, str] | None): Direct relation pointing to the type node. + space: The workspace for the node, a unique identifier for the space. + external_id: Combined with the space is the unique identifier of the node. + version: Current version of the node. + last_updated_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + created_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + deleted_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results + properties: Properties of the node. + type: Direct relation pointing to the type node. """ def __init__( @@ -731,7 +731,7 @@ def as_apply(self) -> NodeApply: (CogniteAsset), or ``isUploaded`` (CogniteFile). Returns: - NodeApply: A write node, NodeApply, with all properties (even read-only) copied over. + A write node, NodeApply, with all properties (even read-only) copied over. """ return NodeApply( @@ -775,12 +775,12 @@ class NodeApplyResult(InstanceApplyResult): """A node. This represents the update on the node. Args: - space (str): The workspace for the node, a unique identifier for the space. - external_id (str): Combined with the space is the unique identifier of the node. - version (int): Current version of the node. - was_modified (bool): Whether the node was modified by the ingestion. - last_updated_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - created_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + space: The workspace for the node, a unique identifier for the space. + external_id: Combined with the space is the unique identifier of the node. + version: Current version of the node. + was_modified: Whether the node was modified by the ingestion. + last_updated_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + created_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. """ def __init__( @@ -815,13 +815,13 @@ class EdgeApply(InstanceApply["EdgeApply"]): """An Edge. This is the write version of the edge. Args: - space (str): The workspace for the edge, a unique identifier for the space. - external_id (str): Combined with the space is the unique identifier of the edge. - type (DirectRelationReference | tuple[str, str]): The type of edge. - start_node (DirectRelationReference | tuple[str, str]): Reference to the direct relation. The reference consists of a space and an external-id. - end_node (DirectRelationReference | tuple[str, str]): Reference to the direct relation. The reference consists of a space and an external-id. - existing_version (int | None): Fail the ingestion request if the node's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the edge (for the specified container or edge). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. - sources (list[NodeOrEdgeData] | None): List of source properties to write. The properties are from the edge and/or container the container(s) making up this node. + space: The workspace for the edge, a unique identifier for the space. + external_id: Combined with the space is the unique identifier of the edge. + type: The type of edge. + start_node: Reference to the direct relation. The reference consists of a space and an external-id. + end_node: Reference to the direct relation. The reference consists of a space and an external-id. + existing_version: Fail the ingestion request if the node's version is greater than or equal to this value. If no existingVersion is specified, the ingestion will always overwrite any existing data for the edge (for the specified container or edge). If existingVersion is set to 0, the upsert will behave as an insert, so it will fail the bulk if the item already exists. If skipOnVersionConflict is set on the ingestion request, then the item will be skipped instead of failing the ingestion request. + sources: List of source properties to write. The properties are from the edge and/or container the container(s) making up this node. """ def __init__( @@ -877,16 +877,16 @@ class Edge(Instance[EdgeApply]): """An Edge. This is the read version of the edge. Args: - space (str): The workspace for the edge, a unique identifier for the space. - external_id (str): Combined with the space is the unique identifier of the edge. - version (int): Current version of the edge. - type (DirectRelationReference | tuple[str, str]): The type of edge. - last_updated_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - created_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - start_node (DirectRelationReference | tuple[str, str]): Reference to the direct relation. The reference consists of a space and an external-id. - end_node (DirectRelationReference | tuple[str, str]): Reference to the direct relation. The reference consists of a space and an external-id. - deleted_time (int | None): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results - properties (Properties | None): No description. + space: The workspace for the edge, a unique identifier for the space. + external_id: Combined with the space is the unique identifier of the edge. + version: Current version of the edge. + type: The type of edge. + last_updated_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + created_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + start_node: Reference to the direct relation. The reference consists of a space and an external-id. + end_node: Reference to the direct relation. The reference consists of a space and an external-id. + deleted_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Timestamp when the instance was soft deleted. Note that deleted instances are filtered out of query results, but present in sync results + properties: No description. """ def __init__( @@ -917,7 +917,7 @@ def as_apply(self) -> EdgeApply: edge will fail on ingestion. Returns: - EdgeApply: A write edge, EdgeApply, with all properties (even read-only) copied over. + A write edge, EdgeApply, with all properties (even read-only) copied over. """ return EdgeApply( @@ -969,12 +969,12 @@ class EdgeApplyResult(InstanceApplyResult): """An Edge. This represents the update on the edge. Args: - space (str): The workspace for the edge, a unique identifier for the space. - external_id (str): Combined with the space is the unique identifier of the edge. - version (int): Current version of the edge. - was_modified (bool): Whether the edge was modified by the ingestion. - last_updated_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - created_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + space: The workspace for the edge, a unique identifier for the space. + external_id: Combined with the space is the unique identifier of the edge. + version: Current version of the edge. + was_modified: Whether the edge was modified by the ingestion. + last_updated_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + created_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. """ def __init__( @@ -1013,7 +1013,7 @@ def as_ids(self) -> list[NodeId]: Convert the list of nodes to a list of node ids. Returns: - list[NodeId]: A list of node ids. + A list of node ids. """ return [result.as_id() for result in self] @@ -1026,7 +1026,7 @@ def as_ids(self) -> list[NodeId]: Convert the list of nodes to a list of node ids. Returns: - list[NodeId]: A list of node ids. + A list of node ids. """ return [node.as_id() for node in self] @@ -1065,11 +1065,11 @@ def get( # type: ignore [override] """Get an instance from this list by instance ID. Args: - instance_id (InstanceId | tuple[str, str] | None): The instance ID to get. A tuple on the form (space, external_id) is also accepted. - external_id (str | None): The external ID of the instance to return. Will raise ValueError when ambiguous (in presence of multiple spaces). + instance_id: The instance ID to get. A tuple on the form (space, external_id) is also accepted. + external_id: The external ID of the instance to return. Will raise ValueError when ambiguous (in presence of multiple spaces). Returns: - T_Instance | None: The requested instance if present, else None + The requested instance if present, else None """ if not exactly_one_is_not_none(instance_id, external_id): raise ValueError( @@ -1106,13 +1106,13 @@ def to_pandas( # type: ignore [override] keys in the metadata that already exist in the DataFrame, then an error will be raised by pandas during joining. Args: - camel_case (bool): Convert column names to camel case (e.g. `externalId` instead of `external_id`). Does not apply to properties. - convert_timestamps (bool): Convert known columns storing CDF timestamps (milliseconds since epoch) to datetime. Does not affect properties. - expand_properties (bool): Expand the properties into separate columns. - remove_property_prefix (bool): Attempt to remove the view ID prefix from columns names of expanded properties. Requires data to be from a single view and that all property names do not conflict with base properties (e.g. 'space' or 'type'). In such cases, a warning is issued and the prefix is kept. + camel_case: Convert column names to camel case (e.g. `externalId` instead of `external_id`). Does not apply to properties. + convert_timestamps: Convert known columns storing CDF timestamps (milliseconds since epoch) to datetime. Does not affect properties. + expand_properties: Expand the properties into separate columns. + remove_property_prefix: Attempt to remove the view ID prefix from columns names of expanded properties. Requires data to be from a single view and that all property names do not conflict with base properties (e.g. 'space' or 'type'). In such cases, a warning is issued and the prefix is kept. Returns: - pd.DataFrame: The Cognite resource as a dataframe. + The Cognite resource as a dataframe. """ df = super().to_pandas(camel_case=camel_case, expand_metadata=False, convert_timestamps=convert_timestamps) if not expand_properties or "properties" not in df.columns: @@ -1168,7 +1168,7 @@ def as_ids(self) -> list[NodeId]: Convert the list of nodes to a list of node ids. Returns: - list[NodeId]: A list of node ids. + A list of node ids. """ return [node.as_id() for node in self] @@ -1231,7 +1231,7 @@ def as_ids(self) -> list[EdgeId]: Convert the list of edges to a list of edge ids. Returns: - list[EdgeId]: A list of edge ids. + A list of edge ids. """ return [edge.as_id() for edge in self] @@ -1244,7 +1244,7 @@ def as_ids(self) -> list[EdgeId]: Convert the list of edges to a list of edge ids. Returns: - list[EdgeId]: A list of edge ids. + A list of edge ids. """ return [edge.as_id() for edge in self] @@ -1270,7 +1270,7 @@ def as_ids(self) -> list[EdgeId]: Convert the list of edges to a list of edge ids. Returns: - list[EdgeId]: A list of edge ids. + A list of edge ids. """ return [edge.as_id() for edge in self] @@ -1332,8 +1332,8 @@ class InstancesApply: This represents the write request of an instance query Args: - nodes (NodeApplyList): A list of nodes. - edges (EdgeApplyList): A list of edges. + nodes: A list of nodes. + edges: A list of edges. """ nodes: NodeApplyList @@ -1355,8 +1355,8 @@ class InstancesResult(Generic[T_Node, T_Edge]): """This represents the read result of an instance query Args: - nodes (NodeList): A list of nodes. - edges (EdgeList): A list of edges. + nodes: A list of nodes. + edges: A list of edges. """ @@ -1369,8 +1369,8 @@ class InstancesApplyResult: """This represents the write result of an instance query Args: - nodes (NodeApplyResultList): A list of nodes. - edges (EdgeApplyResultList): A list of edges. + nodes: A list of nodes. + edges: A list of edges. """ @@ -1383,8 +1383,8 @@ class InstancesDeleteResult: """This represents the delete result of an instance query Args: - nodes (list[NodeId]): A list of node ids. - edges (list[EdgeId]): A list of edge ids. + nodes: A list of node ids. + edges: A list of edge ids. """ @@ -1558,7 +1558,7 @@ class PropertyOptions: compared to the name in the Python class. Args: - identifier (str | None): The name of the property in the Data Model. Defaults to the name of the property in the Python class. + identifier: The name of the property in the Data Model. Defaults to the name of the property in the Python class. """ def __init__(self, identifier: str | None = None) -> None: diff --git a/cognite/client/data_classes/data_modeling/query.py b/cognite/client/data_classes/data_modeling/query.py index 6d4a66a686..e905c0f3f0 100644 --- a/cognite/client/data_classes/data_modeling/query.py +++ b/cognite/client/data_classes/data_modeling/query.py @@ -185,10 +185,10 @@ class Query(QueryBase["ResultSetExpression", Select]): r"""Query allows you to do advanced queries on the data model. Args: - with_ (Mapping[str, ResultSetExpression]): A dictionary of result set expressions to use in the query. The keys are used to reference the result set expressions in the select and parameters. - select (Mapping[str, Select]): A dictionary of select expressions to use in the query. The keys must match the keys in the with\_ dictionary. The select expressions define which properties to include in the result set. - parameters (Mapping[str, PropertyValue] | None): Values in filters can be parameterised. Parameters are provided as part of the query object, and referenced in the filter itself. - cursors (Mapping[str, str | None] | None): A dictionary of cursors to use in the query. These allow for pagination. + with_: A dictionary of result set expressions to use in the query. The keys are used to reference the result set expressions in the select and parameters. + select: A dictionary of select expressions to use in the query. The keys must match the keys in the with\_ dictionary. The select expressions define which properties to include in the result set. + parameters: Values in filters can be parameterised. Parameters are provided as part of the query object, and referenced in the filter itself. + cursors: A dictionary of cursors to use in the query. These allow for pagination. """ @classmethod @@ -201,11 +201,11 @@ class QuerySync(QueryBase["ResultSetExpressionSync", SelectSync]): r"""Sync allows you to do subscribe to changes in instances. Args: - with_ (Mapping[str, ResultSetExpressionSync]): A dictionary of result set expressions to use in the query. The keys are used to reference the result set expressions in the select and parameters. - select (Mapping[str, SelectSync]): A dictionary of select expressions to use in the query. The keys must match the keys in the with\_ dictionary. The select expressions define which properties to include in the result set. - parameters (Mapping[str, PropertyValue] | None): Values in filters can be parameterised. Parameters are provided as part of the query object, and referenced in the filter itself. - cursors (Mapping[str, str | None] | None): A dictionary of cursors to use in the query. These allow for pagination. - allow_expired_cursors_and_accept_missed_deletes (bool): Sync cursors expire after 3 days because soft-deleted instances are cleaned up after this grace period, so a client using a cursor older than that risks missing deletes. If set to True, the API will allow the use of expired cursors. + with_: A dictionary of result set expressions to use in the query. The keys are used to reference the result set expressions in the select and parameters. + select: A dictionary of select expressions to use in the query. The keys must match the keys in the with\_ dictionary. The select expressions define which properties to include in the result set. + parameters: Values in filters can be parameterised. Parameters are provided as part of the query object, and referenced in the filter itself. + cursors: A dictionary of cursors to use in the query. These allow for pagination. + allow_expired_cursors_and_accept_missed_deletes: Sync cursors expire after 3 days because soft-deleted instances are cleaned up after this grace period, so a client using a cursor older than that risks missing deletes. If set to True, the API will allow the use of expired cursors. """ allow_expired_cursors_and_accept_missed_deletes: bool = False @@ -292,13 +292,13 @@ class NodeResultSetExpression(NodeOrEdgeResultSetExpression): """Describes how to query for nodes in the data model. Args: - from_ (str | None): Chain your result-expression based on this view. - filter (Filter | None): Filter the result set based on this filter. - sort (list[InstanceSort] | None): Sort the result set based on this list of sort criteria. - limit (int | None): Limit the result set to this number of instances. - through (list[str] | tuple[str, str, str] | PropertyId | None): Chain your result-expression through this container or view. The property must be a reference to a direct relation property. `from_` must be defined. The tuple must be on the form (space, container, property) or (space, view/version, property). - direction (Literal['outwards', 'inwards']): The direction to use when traversing direct relations. Only applicable when through is specified. - chain_to (Literal['destination', 'source']): Control which side of the edge to chain to. The chain_to option is only applicable if the result rexpression referenced in `from` contains edges. `source` will chain to start if you're following edges outwards i.e `direction=outwards`. If you're following edges inwards i.e `direction=inwards`, it will chain to end. `destination` (default) will chain to end if you're following edges outwards i.e `direction=outwards`. If you're following edges inwards i.e, `direction=inwards`, it will chain to start. + from_: Chain your result-expression based on this view. + filter: Filter the result set based on this filter. + sort: Sort the result set based on this list of sort criteria. + limit: Limit the result set to this number of instances. + through: Chain your result-expression through this container or view. The property must be a reference to a direct relation property. `from_` must be defined. The tuple must be on the form (space, container, property) or (space, view/version, property). + direction: The direction to use when traversing direct relations. Only applicable when through is specified. + chain_to: Control which side of the edge to chain to. The chain_to option is only applicable if the result rexpression referenced in `from` contains edges. `source` will chain to start if you're following edges outwards i.e `direction=outwards`. If you're following edges inwards i.e `direction=inwards`, it will chain to end. `destination` (default) will chain to end if you're following edges outwards i.e `direction=outwards`. If you're following edges inwards i.e, `direction=inwards`, it will chain to start. """ through: PropertyId | None = None @@ -360,17 +360,17 @@ class EdgeResultSetExpression(NodeOrEdgeResultSetExpression): """Describes how to query for edges in the data model. Args: - from_ (str | None): Chain your result expression from this edge. - max_distance (int | None): The largest - max - number of levels to traverse. - direction (Literal['outwards', 'inwards']): The direction to use when traversing. - filter (Filter | None): Filter the result set based on this filter. - node_filter (Filter | None): Filter the result set based on this filter. - termination_filter (Filter | None): Filter the result set based on this filter. - limit_each (int | None): Limit the number of returned edges for each of the source nodes in the result set. The indicated uniform limit applies to the result set from the referenced from. limitEach only has meaning when you also specify maxDistance=1 and from. - sort (list[InstanceSort] | None): Sort the result set based on this list of sort criteria. - post_sort (list[InstanceSort] | None): Sort the result set based on this list of sort criteria. - limit (int | None): Limit the result set to this number of instances. - chain_to (Literal['destination', 'source']): Control which side of the edge to chain to. The chain_to option is only applicable if the result rexpression referenced in `from` contains edges. `source` will chain to start if you're following edges outwards i.e `direction=outwards`. If you're following edges inwards i.e `direction=inwards`, it will chain to end. `destination` (default) will chain to end if you're following edges outwards i.e `direction=outwards`. If you're following edges inwards i.e, `direction=inwards`, it will chain to start. + from_: Chain your result expression from this edge. + max_distance: The largest - max - number of levels to traverse. + direction: The direction to use when traversing. + filter: Filter the result set based on this filter. + node_filter: Filter the result set based on this filter. + termination_filter: Filter the result set based on this filter. + limit_each: Limit the number of returned edges for each of the source nodes in the result set. The indicated uniform limit applies to the result set from the referenced from. limitEach only has meaning when you also specify maxDistance=1 and from. + sort: Sort the result set based on this list of sort criteria. + post_sort: Sort the result set based on this list of sort criteria. + limit: Limit the result set to this number of instances. + chain_to: Control which side of the edge to chain to. The chain_to option is only applicable if the result rexpression referenced in `from` contains edges. `source` will chain to start if you're following edges outwards i.e `direction=outwards`. If you're following edges inwards i.e `direction=inwards`, it will chain to end. `destination` (default) will chain to end if you're following edges outwards i.e `direction=outwards`. If you're following edges inwards i.e, `direction=inwards`, it will chain to start. """ max_distance: int | None = None @@ -480,15 +480,15 @@ class NodeResultSetExpressionSync(ResultSetExpressionSync): """Describes how to query for nodes in the data model. Args: - from_ (str | None): Chain your result-expression based on this view. - filter (Filter | None): Filter the result set based on this filter. - limit (int | None): Limit the result set to this number of instances. - through (list[str] | tuple[str, str, str] | PropertyId | None): Chain your result-expression through this container or view. The property must be a reference to a direct relation property. `from_` must be defined. The tuple must be on the form (space, container, property) or (space, view/version, property). - direction (Literal['outwards', 'inwards']): The direction to use when traversing direct relations. Only applicable when through is specified. - chain_to (Literal['destination', 'source']): Control which side of the edge to chain to. The chain_to option is only applicable if the result rexpression referenced in `from` contains edges. `source` will chain to start if you're following edges outwards i.e `direction=outwards`. If you're following edges inwards i.e `direction=inwards`, it will chain to end. `destination` (default) will chain to end if you're following edges outwards i.e `direction=outwards`. If you're following edges inwards i.e, `direction=inwards`, it will chain to start. - skip_already_deleted (bool): If set to False, the API will return instances that have been soft deleted before sync was initiated. Soft deletes that happen after the sync is initiated and a cursor generated, are always included in the result. Soft deleted instances are identified by having deletedTime set. - sync_mode (Literal['one_phase', 'two_phase', 'no_backfill'] | None): Specify whether to sync instances in a single phase; in a backfill phase followed by live updates, or without any backfill. Only valid for sync operations. - backfill_sort (list[InstanceSort] | None): Sort the result set during the backfill phase of a two phase sync. Only valid with sync_mode = "two_phase". The sort must be backed by a cursorable index. + from_: Chain your result-expression based on this view. + filter: Filter the result set based on this filter. + limit: Limit the result set to this number of instances. + through: Chain your result-expression through this container or view. The property must be a reference to a direct relation property. `from_` must be defined. The tuple must be on the form (space, container, property) or (space, view/version, property). + direction: The direction to use when traversing direct relations. Only applicable when through is specified. + chain_to: Control which side of the edge to chain to. The chain_to option is only applicable if the result rexpression referenced in `from` contains edges. `source` will chain to start if you're following edges outwards i.e `direction=outwards`. If you're following edges inwards i.e `direction=inwards`, it will chain to end. `destination` (default) will chain to end if you're following edges outwards i.e `direction=outwards`. If you're following edges inwards i.e, `direction=inwards`, it will chain to start. + skip_already_deleted: If set to False, the API will return instances that have been soft deleted before sync was initiated. Soft deletes that happen after the sync is initiated and a cursor generated, are always included in the result. Soft deleted instances are identified by having deletedTime set. + sync_mode: Specify whether to sync instances in a single phase; in a backfill phase followed by live updates, or without any backfill. Only valid for sync operations. + backfill_sort: Sort the result set during the backfill phase of a two phase sync. Only valid with sync_mode = "two_phase". The sort must be backed by a cursorable index. """ through: PropertyId | None = None @@ -567,17 +567,17 @@ class EdgeResultSetExpressionSync(ResultSetExpressionSync): """Describes how to query for edges in the data model. Args: - from_ (str | None): Chain your result expression from this edge. - max_distance (int | None): The largest - max - number of levels to traverse. - direction (Literal['outwards', 'inwards']): The direction to use when traversing. - filter (Filter | None): Filter the result set based on this filter. - node_filter (Filter | None): Filter the result set based on this filter. - termination_filter (Filter | None): Filter the result set based on this filter. - limit (int | None): Limit the result set to this number of instances. - chain_to (Literal['destination', 'source']): Control which side of the edge to chain to. The chain_to option is only applicable if the result rexpression referenced in `from` contains edges. `source` will chain to start if you're following edges outwards i.e `direction=outwards`. If you're following edges inwards i.e `direction=inwards`, it will chain to end. `destination` (default) will chain to end if you're following edges outwards i.e `direction=outwards`. If you're following edges inwards i.e, `direction=inwards`, it will chain to start. - skip_already_deleted (bool): If set to False, the API will return instances that have been soft deleted before sync was initiated. Soft deletes that happen after the sync is initiated and a cursor generated, are always included in the result. Soft deleted instances are identified by having deletedTime set. - sync_mode (Literal['one_phase', 'two_phase', 'no_backfill'] | None): Specify whether to sync instances in a single phase; in a backfill phase followed by live updates, or without any backfill. Only valid for sync operations. - backfill_sort (list[InstanceSort] | None): Sort the result set during the backfill phase of a two phase sync. Only valid with sync_mode = "two_phase". The sort must be backed by a cursorable index. + from_: Chain your result expression from this edge. + max_distance: The largest - max - number of levels to traverse. + direction: The direction to use when traversing. + filter: Filter the result set based on this filter. + node_filter: Filter the result set based on this filter. + termination_filter: Filter the result set based on this filter. + limit: Limit the result set to this number of instances. + chain_to: Control which side of the edge to chain to. The chain_to option is only applicable if the result rexpression referenced in `from` contains edges. `source` will chain to start if you're following edges outwards i.e `direction=outwards`. If you're following edges inwards i.e `direction=inwards`, it will chain to end. `destination` (default) will chain to end if you're following edges outwards i.e `direction=outwards`. If you're following edges inwards i.e, `direction=inwards`, it will chain to start. + skip_already_deleted: If set to False, the API will return instances that have been soft deleted before sync was initiated. Soft deletes that happen after the sync is initiated and a cursor generated, are always included in the result. Soft deleted instances are identified by having deletedTime set. + sync_mode: Specify whether to sync instances in a single phase; in a backfill phase followed by live updates, or without any backfill. Only valid for sync operations. + backfill_sort: Sort the result set during the backfill phase of a two phase sync. Only valid with sync_mode = "two_phase". The sort must be backed by a cursorable index. """ max_distance: int | None = None diff --git a/cognite/client/data_classes/data_modeling/spaces.py b/cognite/client/data_classes/data_modeling/spaces.py index 1f1adfd9b9..067370232f 100644 --- a/cognite/client/data_classes/data_modeling/spaces.py +++ b/cognite/client/data_classes/data_modeling/spaces.py @@ -19,9 +19,9 @@ class SpaceCore(WritableDataModelingResource["SpaceApply"], ABC): """A workspace for data models and instances. Args: - space (str): A unique identifier for the space. - description (str | None): Textual description of the space - name (str | None): Human readable name for the space. + space: A unique identifier for the space. + description: Textual description of the space + name: Human readable name for the space. """ def __init__(self, space: str, description: str | None, name: str | None) -> None: @@ -37,9 +37,9 @@ class SpaceApply(SpaceCore): """A workspace for data models and instances. This is the write version Args: - space (str): A unique identifier for the space. - description (str | None): Textual description of the space - name (str | None): Human readable name for the space. + space: A unique identifier for the space. + description: Textual description of the space + name: Human readable name for the space. """ def __init__(self, space: str, description: str | None = None, name: str | None = None) -> None: @@ -63,12 +63,12 @@ class Space(SpaceCore): """A workspace for data models and instances. This is the read version. Args: - space (str): A unique identifier for the space. - is_global (bool): Whether the space is global or not. - last_updated_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - created_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - description (str | None): Textual description of the space - name (str | None): Human readable name for the space. + space: A unique identifier for the space. + is_global: Whether the space is global or not. + last_updated_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + created_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + description: Textual description of the space + name: Human readable name for the space. """ def __init__( @@ -115,7 +115,7 @@ def as_ids(self) -> list[str]: Converts all the spaces to a space id list. Returns: - list[str]: A list of space ids. + A list of space ids. """ return [item.space for item in self] @@ -131,10 +131,10 @@ def get(self, space: str) -> Space | None: # type: ignore [override] """Get a space object from this list by space ID. Args: - space (str): The space identifier to get. + space: The space identifier to get. Returns: - Space | None: The requested space if present, else None + The requested space if present, else None """ return self._space_to_item.get(space) @@ -151,7 +151,7 @@ def as_ids(self) -> list[str]: Converts all the spaces to a space id list. Returns: - list[str]: A list of space ids. + A list of space ids. """ return [item.space for item in self] @@ -160,7 +160,7 @@ def as_apply(self) -> SpaceApplyList: Converts all the spaces to a space apply list. Returns: - SpaceApplyList: A list of space applies. + A list of space applies. """ return SpaceApplyList([item.as_apply() for item in self]) diff --git a/cognite/client/data_classes/data_modeling/statistics.py b/cognite/client/data_classes/data_modeling/statistics.py index 68dd613161..563a0e7c27 100644 --- a/cognite/client/data_classes/data_modeling/statistics.py +++ b/cognite/client/data_classes/data_modeling/statistics.py @@ -12,14 +12,14 @@ class InstanceStatistics(CogniteResource): """Statistics for instances in the data modeling API. Attributes: - edges (int): Number of edges in the project. - soft_deleted_edges (int): Number of soft-deleted edges in the project. - nodes (int): Number of nodes in the project. - soft_deleted_nodes (int): Number of soft-deleted nodes in the project. - instances (int): Total number of instances in the project. - instances_limit (int): Maximum number of instances allowed in the project. - soft_deleted_instances (int): Total number of soft-deleted instances in the project. - soft_deleted_instances_limit (int): Maximum number of soft-deleted instances allowed in the project. + edges: Number of edges in the project. + soft_deleted_edges: Number of soft-deleted edges in the project. + nodes: Number of nodes in the project. + soft_deleted_nodes: Number of soft-deleted nodes in the project. + instances: Total number of instances in the project. + instances_limit: Maximum number of instances allowed in the project. + soft_deleted_instances: Total number of soft-deleted instances in the project. + soft_deleted_instances_limit: Maximum number of soft-deleted instances allowed in the project. """ edges: int @@ -50,8 +50,8 @@ class CountLimit(CogniteResource): """Usage and limits for a specific resource in the data modeling API. Attributes: - count (int): The current usage count for the resource. - limit (int): The maximum allowed limit for the resource. + count: The current usage count for the resource. + limit: The maximum allowed limit for the resource. """ @@ -68,14 +68,14 @@ class SpaceStatistics(CogniteResource): """Statistics for a space in the data modeling API. Attributes: - space (str): The space name - containers (int): Number of containers in the space. - views (int): Number of views in the space. - data_models (int): Number of data models in the space. - nodes (int): Number of nodes in the space. - edges (int): Number of edges in the space. - soft_deleted_nodes (int): Number of soft-deleted nodes in the space. - soft_deleted_edges (int): Number of soft-deleted edges in the space. + space: The space name + containers: Number of containers in the space. + views: Number of views in the space. + data_models: Number of data models in the space. + nodes: Number of nodes in the space. + edges: Number of edges in the space. + soft_deleted_nodes: Number of soft-deleted nodes in the space. + soft_deleted_edges: Number of soft-deleted edges in the space. """ @@ -107,15 +107,15 @@ class ProjectStatistics(CogniteResource): """Statistics for a project in the data modeling API. Attributes: - spaces (CountLimit): Usage and limits for spaces in the project - containers (CountLimit): Usage and limits for containers in the project - views (CountLimit): Usage and limits for views including all versions in the project - data_models (CountLimit): Usage and limits for data models including all versions in the project - container_properties (CountLimit): Usage and limits for sum of container properties in the project - instances (InstanceStatistics): Usage and limits for number of instances in the project - concurrent_read_limit (int): Maximum number of concurrent read operations allowed in the project - concurrent_write_limit (int): Maximum number of concurrent write operations allowed in the project - concurrent_delete_limit (int): Maximum number of concurrent delete operations allowed in the project + spaces: Usage and limits for spaces in the project + containers: Usage and limits for containers in the project + views: Usage and limits for views including all versions in the project + data_models: Usage and limits for data models including all versions in the project + container_properties: Usage and limits for sum of container properties in the project + instances: Usage and limits for number of instances in the project + concurrent_read_limit: Maximum number of concurrent read operations allowed in the project + concurrent_write_limit: Maximum number of concurrent write operations allowed in the project + concurrent_delete_limit: Maximum number of concurrent delete operations allowed in the project """ spaces: CountLimit diff --git a/cognite/client/data_classes/data_modeling/views.py b/cognite/client/data_classes/data_modeling/views.py index 2a72c9c590..c63bf668bb 100644 --- a/cognite/client/data_classes/data_modeling/views.py +++ b/cognite/client/data_classes/data_modeling/views.py @@ -67,14 +67,14 @@ class ViewApply(ViewCore): """A group of properties. Write only version. Args: - space (str): The workspace for the view, a unique identifier for the space. - external_id (str): Combined with the space is the unique identifier of the view. - version (str): DMS version. - description (str | None): Textual description of the view - name (str | None): Human readable name for the view. - filter (Filter | None): A filter Domain Specific Language (DSL) used to create advanced filter queries. - implements (list[ViewId] | None): References to the views from where this view will inherit properties and edges. - properties (dict[str, ViewPropertyApply] | None): No description. + space: The workspace for the view, a unique identifier for the space. + external_id: Combined with the space is the unique identifier of the view. + version: DMS version. + description: Textual description of the view + name: Human readable name for the view. + filter: A filter Domain Specific Language (DSL) used to create advanced filter queries. + implements: References to the views from where this view will inherit properties and edges. + properties: No description. .. note:: The order of elements (i.e., `ViewId`) in :code:`implements` matters, as it indicates priority on how to handle @@ -135,7 +135,7 @@ def referenced_containers(self) -> set[ContainerId]: """Helper function to get the set of containers referenced by this view. Returns: - set[ContainerId]: The set of containers referenced by this view. + The set of containers referenced by this view. """ referenced_containers = set() for prop in (self.properties or {}).values(): @@ -148,19 +148,19 @@ class View(ViewCore): """A group of properties. Read only version. Args: - space (str): The workspace for the view, a unique identifier for the space. - external_id (str): Combined with the space is the unique identifier of the view. - version (str): DMS version. - properties (dict[str, ViewProperty]): View with included properties and expected edges, indexed by a unique space-local identifier. - last_updated_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - created_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - description (str | None): Textual description of the view - name (str | None): Human readable name for the view. - filter (Filter | None): A filter Domain Specific Language (DSL) used to create advanced filter queries. - implements (list[ViewId] | None): References to the views from where this view will inherit properties and edges. - writable (bool): Whether the view supports write operations. - used_for (Literal['node', 'edge', 'all']): Does this view apply to nodes, edges or both. - is_global (bool): Whether this is a global view. + space: The workspace for the view, a unique identifier for the space. + external_id: Combined with the space is the unique identifier of the view. + version: DMS version. + properties: View with included properties and expected edges, indexed by a unique space-local identifier. + last_updated_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + created_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + description: Textual description of the view + name: Human readable name for the view. + filter: A filter Domain Specific Language (DSL) used to create advanced filter queries. + implements: References to the views from where this view will inherit properties and edges. + writable: Whether the view supports write operations. + used_for: Does this view apply to nodes, edges or both. + is_global: Whether this is a global view. """ def __init__( @@ -224,7 +224,7 @@ def as_apply(self) -> ViewApply: """Convert to a view applies. Returns: - ViewApply: The view apply. + The view apply. """ properties: dict[str, ViewPropertyApply] | None = None if self.properties: @@ -263,7 +263,7 @@ def referenced_containers(self) -> set[ContainerId]: """Helper function to get the set of containers referenced by this view. Returns: - set[ContainerId]: The set of containers referenced by this view. + The set of containers referenced by this view. """ referenced_containers = set() for prop in self.properties.values(): @@ -279,7 +279,7 @@ def as_ids(self) -> list[ViewId]: """Returns the list of ViewIds Returns: - list[ViewId]: The list of ViewIds + The list of ViewIds """ return [v.as_id() for v in self] @@ -287,7 +287,7 @@ def referenced_containers(self) -> set[ContainerId]: """Helper function to get the set of containers referenced by this view. Returns: - set[ContainerId]: The set of containers referenced by this view. + The set of containers referenced by this view. """ referenced_containers = set() for view in self: @@ -302,7 +302,7 @@ def as_apply(self) -> ViewApplyList: """Convert to a view an apply list. Returns: - ViewApplyList: The view apply list. + The view apply list. """ return ViewApplyList([v.as_apply() for v in self]) @@ -310,7 +310,7 @@ def as_ids(self) -> list[ViewId]: """Returns the list of ViewIds Returns: - list[ViewId]: The list of ViewIds + The list of ViewIds """ return [v.as_id() for v in self] @@ -321,7 +321,7 @@ def referenced_containers(self) -> set[ContainerId]: """Helper function to get the set of containers referenced by this view. Returns: - set[ContainerId]: The set of containers referenced by this view. + The set of containers referenced by this view. """ referenced_containers = set() for view in self: @@ -333,10 +333,10 @@ class ViewFilter(CogniteFilter): """Represent the filer arguments for the list endpoint. Args: - space (str | None): The space to query - include_inherited_properties (bool): Whether to include properties inherited from views this view implements. - all_versions (bool): Whether to return all versions. If false, only the newest version is returned, which is determined based on the 'createdTime' field. - include_global (bool): Whether to include global views. + space: The space to query + include_inherited_properties: Whether to include properties inherited from views this view implements. + all_versions: Whether to return all versions. If false, only the newest version is returned, which is determined based on the 'createdTime' field. + include_global: Whether to include global views. """ def __init__( @@ -511,14 +511,14 @@ class EdgeConnection(ConnectionDefinition, ABC): A connection has a max distance of one hop. Args: - type (DirectRelationReference): Reference to the node pointed to by the direct relation. The reference + type: Reference to the node pointed to by the direct relation. The reference consists of a space and an external-id. - source (ViewId): The target node(s) of this connection can be read through the view specified in 'source'. - name (str | None): Readable property name. - description (str | None): Description of the content and suggested use for this property. - edge_source (ViewId | None): The edge(s) of this connection can be read through the view specified in + source: The target node(s) of this connection can be read through the view specified in 'source'. + name: Readable property name. + description: Description of the content and suggested use for this property. + edge_source: The edge(s) of this connection can be read through the view specified in 'edgeSource'. - direction (Literal["outwards", "inwards"]): The direction of the edge. The outward direction is used to + direction: The direction of the edge. The outward direction is used to indicate that the edge points from the source to the target. The inward direction is used to indicate that the edge points from the target to the source. """ @@ -611,11 +611,11 @@ class ReverseDirectRelation(ConnectionDefinition, ABC): It is called 'ReverseDirectRelationConnection' in the API spec. Args: - source (ViewId): The node(s) containing the direct relation property can be read through + source: The node(s) containing the direct relation property can be read through the view specified in 'source'. - through (PropertyId): The view or container of the node containing the direct relation property. - name (str | None): Readable property name. - description (str | None): Description of the content and suggested use for this property. + through: The view or container of the node containing the direct relation property. + name: Readable property name. + description: Description of the content and suggested use for this property. """ @@ -718,14 +718,14 @@ class EdgeConnectionApply(ConnectionDefinitionApply, ABC): It is called 'EdgeConnection' in the API spec. Args: - type (DirectRelationReference): Reference to the node pointed to by the direct relation. The reference + type: Reference to the node pointed to by the direct relation. The reference consists of a space and an external-id. - source (ViewId): The target node(s) of this connection can be read through the view specified in 'source'. - name (str | None): Readable property name. - description (str | None): Description of the content and suggested use for this property. - edge_source (ViewId | None): The edge(s) of this connection can be read through the view specified in + source: The target node(s) of this connection can be read through the view specified in 'source'. + name: Readable property name. + description: Description of the content and suggested use for this property. + edge_source: The edge(s) of this connection can be read through the view specified in 'edgeSource'. - direction (Literal["outwards", "inwards"]): The direction of the edge. The outward direction is used to + direction: The direction of the edge. The outward direction is used to indicate that the edge points from the source to the target. The inward direction is used to indicate that the edge points from the target to the source. """ @@ -804,11 +804,11 @@ class ReverseDirectRelationApply(ConnectionDefinitionApply, ABC): It is called 'ReverseDirectRelationConnection' in the API spec. Args: - source (ViewId): The node(s) containing the direct relation property can be read through + source: The node(s) containing the direct relation property can be read through the view specified in 'source'. - through (PropertyId): The view or container of the node containing the direct relation property. - name (str | None): Readable property name. - description (str | None): Description of the content and suggested use for this property. + through: The view or container of the node containing the direct relation property. + name: Readable property name. + description: Description of the content and suggested use for this property. """ diff --git a/cognite/client/data_classes/data_sets.py b/cognite/client/data_classes/data_sets.py index f22e50c059..a5e2b57228 100644 --- a/cognite/client/data_classes/data_sets.py +++ b/cognite/client/data_classes/data_sets.py @@ -27,11 +27,11 @@ class DataSetCore(WriteableCogniteResource["DataSetWrite"]): This is the read version of the DataSet, which is used when retrieving from CDF. Args: - external_id (str | None): The external ID provided by the client. Must be unique for the resource type. - name (str | None): The name of the data set. - description (str | None): The description of the data set. - metadata (dict[str, str] | None): Custom, application-specific metadata. String key -> String value. Limits: Maximum length of key is 128 bytes, value 10240 bytes, up to 256 key-value pairs, of total size at most 10240. - write_protected (bool | None): To write data to a write-protected data set, you need to be a member of a group that has the "datasets:owner" action for the data set. To learn more about write-protected data sets, follow this [guide](/cdf/data_governance/concepts/datasets/#write-protection) + external_id: The external ID provided by the client. Must be unique for the resource type. + name: The name of the data set. + description: The description of the data set. + metadata: Custom, application-specific metadata. String key -> String value. Limits: Maximum length of key is 128 bytes, value 10240 bytes, up to 256 key-value pairs, of total size at most 10240. + write_protected: To write data to a write-protected data set, you need to be a member of a group that has the "datasets:owner" action for the data set. To learn more about write-protected data sets, follow this [guide](/cdf/data_governance/concepts/datasets/#write-protection) """ def __init__( @@ -54,14 +54,14 @@ class DataSet(DataSetCore): This is the read version of the DataSet, which is used when retrieving from CDF. Args: - id (int): A server-generated ID for the object. - created_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - last_updated_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - write_protected (bool): To write data to a write-protected data set, you need to be a member of a group that has the "datasets:owner" action for the data set. To learn more about write-protected data sets, follow this [guide](/cdf/data_governance/concepts/datasets/#write-protection) - external_id (str | None): The external ID provided by the client. Must be unique for the resource type. - name (str | None): The name of the data set. - description (str | None): The description of the data set. - metadata (dict[str, str] | None): Custom, application-specific metadata. String key -> String value. Limits: Maximum length of key is 128 bytes, value 10240 bytes, up to 256 key-value pairs, of total size at most 10240. + id: A server-generated ID for the object. + created_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + last_updated_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + write_protected: To write data to a write-protected data set, you need to be a member of a group that has the "datasets:owner" action for the data set. To learn more about write-protected data sets, follow this [guide](/cdf/data_governance/concepts/datasets/#write-protection) + external_id: The external ID provided by the client. Must be unique for the resource type. + name: The name of the data set. + description: The description of the data set. + metadata: Custom, application-specific metadata. String key -> String value. Limits: Maximum length of key is 128 bytes, value 10240 bytes, up to 256 key-value pairs, of total size at most 10240. """ def __init__( @@ -114,11 +114,11 @@ class DataSetWrite(DataSetCore): This is the read version of the DataSet, which is used when retrieving from CDF. Args: - external_id (str | None): The external ID provided by the client. Must be unique for the resource type. - name (str | None): The name of the data set. - description (str | None): The description of the data set. - metadata (dict[str, str] | None): Custom, application-specific metadata. String key -> String value. Limits: Maximum length of key is 128 bytes, value 10240 bytes, up to 256 key-value pairs, of total size at most 10240. - write_protected (bool | None): To write data to a write-protected data set, you need to be a member of a group that has the "datasets:owner" action for the data set. To learn more about write-protected data sets, follow this [guide](/cdf/data_governance/concepts/datasets/#write-protection) + external_id: The external ID provided by the client. Must be unique for the resource type. + name: The name of the data set. + description: The description of the data set. + metadata: Custom, application-specific metadata. String key -> String value. Limits: Maximum length of key is 128 bytes, value 10240 bytes, up to 256 key-value pairs, of total size at most 10240. + write_protected: To write data to a write-protected data set, you need to be a member of a group that has the "datasets:owner" action for the data set. To learn more about write-protected data sets, follow this [guide](/cdf/data_governance/concepts/datasets/#write-protection) """ def __init__( @@ -156,11 +156,11 @@ class DataSetFilter(CogniteFilter): """Filter on data sets with strict matching. Args: - metadata (dict[str, str] | None): Custom, application specific metadata. String key -> String value. Limits: Maximum length of key is 128 bytes, value 10240 bytes, up to 256 key-value pairs, of total size at most 10240. - created_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. - last_updated_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. - external_id_prefix (str | None): Filter by this (case-sensitive) prefix for the external ID. - write_protected (bool | None): No description. + metadata: Custom, application specific metadata. String key -> String value. Limits: Maximum length of key is 128 bytes, value 10240 bytes, up to 256 key-value pairs, of total size at most 10240. + created_time: Range between two timestamps. + last_updated_time: Range between two timestamps. + external_id_prefix: Filter by this (case-sensitive) prefix for the external ID. + write_protected: No description. """ def __init__( @@ -190,8 +190,8 @@ class DataSetUpdate(CogniteUpdate): """Update applied to single data set Args: - id (int): A server-generated ID for the object. - external_id (str): The external ID provided by the client. Must be unique for the resource type. + id: A server-generated ID for the object. + external_id: The external ID provided by the client. Must be unique for the resource type. """ class _PrimitiveDataSetUpdate(CognitePrimitiveUpdate): diff --git a/cognite/client/data_classes/datapoints.py b/cognite/client/data_classes/datapoints.py index e6f6bea329..db19bd1c28 100644 --- a/cognite/client/data_classes/datapoints.py +++ b/cognite/client/data_classes/datapoints.py @@ -402,15 +402,15 @@ class LatestDatapointQuery: Pass either ID or external ID. Args: - id (Optional[int]): The internal ID of the time series to query. - external_id (Optional[str]): The external ID of the time series to query. - instance_id (Optional[NodeId]): The instance ID of the time series to query. - before (Union[None, int, str, datetime]): Get latest datapoint before this time. None means 'now'. - target_unit (str | None): The unit_external_id of the data points returned. If the time series does not have a unit_external_id that can be converted to the target_unit, an error will be returned. Cannot be used with target_unit_system. - target_unit_system (str | None): The unit system of the data points returned. Cannot be used with target_unit. - include_status (bool): Also return the status code, an integer, for each datapoint in the response. - ignore_bad_datapoints (bool): Prevent data points with a bad status code to be returned. Default: True. - treat_uncertain_as_bad (bool): Treat uncertain status codes as bad. If false, treat uncertain as good. Default: True. + id: The internal ID of the time series to query. + external_id: The external ID of the time series to query. + instance_id: The instance ID of the time series to query. + before: Get latest datapoint before this time. None means 'now'. + target_unit: The unit_external_id of the data points returned. If the time series does not have a unit_external_id that can be converted to the target_unit, an error will be returned. Cannot be used with target_unit_system. + target_unit_system: The unit system of the data points returned. Cannot be used with target_unit. + include_status: Also return the status code, an integer, for each datapoint in the response. + ignore_bad_datapoints: Prevent data points with a bad status code to be returned. Default: True. + treat_uncertain_as_bad: Treat uncertain status codes as bad. If false, treat uncertain as good. Default: True. """ id: InitVar[int | None] = None @@ -436,29 +436,29 @@ class Datapoint(CogniteResource): """An object representing a datapoint. Args: - timestamp (int): The data timestamp in milliseconds since the epoch (Jan 1, 1970). Can be negative to define a date before 1970. Minimum timestamp is 1900.01.01 00:00:00 UTC - value (str | float | None): The raw data value. Can be string or numeric. - average (float | None): The time-weighted average value in the aggregate interval. - max (float | None): The maximum value in the aggregate interval. - max_datapoint (MaxDatapoint | MaxDatapointWithStatus | None): Objects with the maximum values and their timestamps in the aggregate intervals, optionally including status codes and symbols. - min (float | None): The minimum value in the aggregate interval. - min_datapoint (MinDatapoint | MinDatapointWithStatus | None): Objects with the minimum values and their timestamps in the aggregate intervals, optionally including status codes and symbols. - count (int | None): The number of raw datapoints in the aggregate interval. - sum (float | None): The sum of the raw datapoints in the aggregate interval. - interpolation (float | None): The interpolated value at the beginning of the aggregate interval. - step_interpolation (float | None): The interpolated value at the beginning of the aggregate interval using stepwise interpretation. - continuous_variance (float | None): The variance of the interpolated underlying function. - discrete_variance (float | None): The variance of the datapoint values. - total_variation (float | None): The total variation of the interpolated underlying function. - count_bad (int | None): The number of raw datapoints with a bad status code, in the aggregate interval. - count_good (int | None): The number of raw datapoints with a good status code, in the aggregate interval. - count_uncertain (int | None): The number of raw datapoints with a uncertain status code, in the aggregate interval. - duration_bad (int | None): The duration the aggregate is defined and marked as bad (measured in milliseconds). - duration_good (int | None): The duration the aggregate is defined and marked as good (measured in milliseconds). - duration_uncertain (int | None): The duration the aggregate is defined and marked as uncertain (measured in milliseconds). - status_code (int | None): The status code for the raw datapoint. - status_symbol (str | None): The status symbol for the raw datapoint. - timezone (datetime.timezone | ZoneInfo | None): The timezone to use when displaying the datapoint. + timestamp: The data timestamp in milliseconds since the epoch (Jan 1, 1970). Can be negative to define a date before 1970. Minimum timestamp is 1900.01.01 00:00:00 UTC + value: The raw data value. Can be string or numeric. + average: The time-weighted average value in the aggregate interval. + max: The maximum value in the aggregate interval. + max_datapoint: Objects with the maximum values and their timestamps in the aggregate intervals, optionally including status codes and symbols. + min: The minimum value in the aggregate interval. + min_datapoint: Objects with the minimum values and their timestamps in the aggregate intervals, optionally including status codes and symbols. + count: The number of raw datapoints in the aggregate interval. + sum: The sum of the raw datapoints in the aggregate interval. + interpolation: The interpolated value at the beginning of the aggregate interval. + step_interpolation: The interpolated value at the beginning of the aggregate interval using stepwise interpretation. + continuous_variance: The variance of the interpolated underlying function. + discrete_variance: The variance of the datapoint values. + total_variation: The total variation of the interpolated underlying function. + count_bad: The number of raw datapoints with a bad status code, in the aggregate interval. + count_good: The number of raw datapoints with a good status code, in the aggregate interval. + count_uncertain: The number of raw datapoints with a uncertain status code, in the aggregate interval. + duration_bad: The duration the aggregate is defined and marked as bad (measured in milliseconds). + duration_good: The duration the aggregate is defined and marked as good (measured in milliseconds). + duration_uncertain: The duration the aggregate is defined and marked as uncertain (measured in milliseconds). + status_code: The status code for the raw datapoint. + status_symbol: The status symbol for the raw datapoint. + timezone: The timezone to use when displaying the datapoint. """ def __init__( @@ -520,10 +520,10 @@ def to_pandas(self, camel_case: bool = False) -> pandas.DataFrame: # type: igno """Convert the datapoint into a pandas DataFrame. Args: - camel_case (bool): Convert column names to camel case (e.g. `stepInterpolation` instead of `step_interpolation`) + camel_case: Convert column names to camel case (e.g. `stepInterpolation` instead of `step_interpolation`) Returns: - pandas.DataFrame: The DataFrame representation of the datapoint. + The DataFrame representation of the datapoint. """ pd = local_import("pandas") @@ -827,11 +827,11 @@ def dump(self, camel_case: bool = True, convert_timestamps: bool = False) -> dic """Dump the DatapointsArray into a json serializable Python data type. Args: - camel_case (bool): Use camelCase for attribute names. Defaults to True. - convert_timestamps (bool): Convert timestamps to ISO 8601 formatted strings. Default: False (returns as integer, milliseconds since epoch) + camel_case: Use camelCase for attribute names. Defaults to True. + convert_timestamps: Convert timestamps to ISO 8601 formatted strings. Default: False (returns as integer, milliseconds since epoch) Returns: - dict[str, Any]: A dictionary representing the instance. + A dictionary representing the instance. """ attrs, arrays = self._data_fields() if not convert_timestamps: # Eh.. so.. we still have to convert... @@ -894,14 +894,13 @@ def to_pandas( # type: ignore [override] """Convert the DatapointsArray into a pandas DataFrame. Args: - include_aggregate_name (bool): Include aggregate in the dataframe columns, if present (separate MultiIndex level) - include_granularity_name (bool): Include granularity in the dataframe columns, if present (separate MultiIndex level) - include_unit (bool): Include the unit_external_id in the dataframe columns, if present (separate MultiIndex level) - include_status (bool): Include status code and status symbol as separate columns, if available. Also adds the status info - as a separate level in the columns (MultiIndex). + include_aggregate_name: Include aggregate in the dataframe columns, if present (separate MultiIndex level) + include_granularity_name: Include granularity in the dataframe columns, if present (separate MultiIndex level) + include_unit: Include the unit_external_id in the dataframe columns, if present (separate MultiIndex level) + include_status: Include status code and status symbol as separate columns, if available. Also adds the status info as a separate level in the columns (MultiIndex). Returns: - pandas.DataFrame: The datapoints as a pandas DataFrame. + The datapoints as a pandas DataFrame. """ local_import("pandas") # throw nice import error early @@ -918,38 +917,38 @@ class Datapoints(CogniteResource): """An object representing a list of datapoints. Args: - id (int | None): Id of the time series the datapoints belong to - external_id (str | None): External id of the time series the datapoints belong to - instance_id (NodeId | None): The instance id of the time series the datapoints belong to - is_string (bool | None): Whether the time series contains numerical or string data. - is_step (bool | None): Whether the time series is stepwise or continuous. - unit (str | None): The physical unit of the time series (free-text field). Omitted if the datapoints were converted to another unit. - unit_external_id (str | None): The unit_external_id (as defined in the unit catalog) of the returned data points. If the datapoints were converted to a compatible unit, this will equal the converted unit, not the one defined on the time series. - granularity (str | None): The granularity of the aggregate datapoints (does not apply to raw data) - timestamp (list[int] | None): The data timestamps in milliseconds since the epoch (Jan 1, 1970). Can be negative to define a date before 1970. Minimum timestamp is 1900.01.01 00:00:00 UTC - value (list[str] | list[float] | None): The raw data values. Can be string or numeric. - average (list[float] | None): The time-weighted average values per aggregate interval. - max (list[float] | None): The maximum values per aggregate interval. - max_datapoint (list[MaxDatapoint] | list[MaxDatapointWithStatus] | None): Objects with the maximum values and their timestamps in the aggregate intervals, optionally including status codes and symbols. - min (list[float] | None): The minimum values per aggregate interval. - min_datapoint (list[MinDatapoint] | list[MinDatapointWithStatus] | None): Objects with the minimum values and their timestamps in the aggregate intervals, optionally including status codes and symbols. - count (list[int] | None): The number of raw datapoints per aggregate interval. - sum (list[float] | None): The sum of the raw datapoints per aggregate interval. - interpolation (list[float] | None): The interpolated values at the beginning of each the aggregate interval. - step_interpolation (list[float] | None): The interpolated values at the beginning of each the aggregate interval using stepwise interpretation. - continuous_variance (list[float] | None): The variance of the interpolated underlying function. - discrete_variance (list[float] | None): The variance of the datapoint values. - total_variation (list[float] | None): The total variation of the interpolated underlying function. - count_bad (list[int] | None): The number of raw datapoints with a bad status code, per aggregate interval. - count_good (list[int] | None): The number of raw datapoints with a good status code, per aggregate interval. - count_uncertain (list[int] | None): The number of raw datapoints with a uncertain status code, per aggregate interval. - duration_bad (list[int] | None): The duration the aggregate is defined and marked as bad (measured in milliseconds). - duration_good (list[int] | None): The duration the aggregate is defined and marked as good (measured in milliseconds). - duration_uncertain (list[int] | None): The duration the aggregate is defined and marked as uncertain (measured in milliseconds). - status_code (list[int] | None): The status codes for the raw datapoints. - status_symbol (list[str] | None): The status symbols for the raw datapoints. - error (list[None | str] | None): Human readable strings with description of what went wrong (returned by synthetic datapoints queries). - timezone (datetime.timezone | ZoneInfo | None): The timezone to use when displaying the datapoints. + id: Id of the time series the datapoints belong to + external_id: External id of the time series the datapoints belong to + instance_id: The instance id of the time series the datapoints belong to + is_string: Whether the time series contains numerical or string data. + is_step: Whether the time series is stepwise or continuous. + unit: The physical unit of the time series (free-text field). Omitted if the datapoints were converted to another unit. + unit_external_id: The unit_external_id (as defined in the unit catalog) of the returned data points. If the datapoints were converted to a compatible unit, this will equal the converted unit, not the one defined on the time series. + granularity: The granularity of the aggregate datapoints (does not apply to raw data) + timestamp: The data timestamps in milliseconds since the epoch (Jan 1, 1970). Can be negative to define a date before 1970. Minimum timestamp is 1900.01.01 00:00:00 UTC + value: The raw data values. Can be string or numeric. + average: The time-weighted average values per aggregate interval. + max: The maximum values per aggregate interval. + max_datapoint: Objects with the maximum values and their timestamps in the aggregate intervals, optionally including status codes and symbols. + min: The minimum values per aggregate interval. + min_datapoint: Objects with the minimum values and their timestamps in the aggregate intervals, optionally including status codes and symbols. + count: The number of raw datapoints per aggregate interval. + sum: The sum of the raw datapoints per aggregate interval. + interpolation: The interpolated values at the beginning of each the aggregate interval. + step_interpolation: The interpolated values at the beginning of each the aggregate interval using stepwise interpretation. + continuous_variance: The variance of the interpolated underlying function. + discrete_variance: The variance of the datapoint values. + total_variation: The total variation of the interpolated underlying function. + count_bad: The number of raw datapoints with a bad status code, per aggregate interval. + count_good: The number of raw datapoints with a good status code, per aggregate interval. + count_uncertain: The number of raw datapoints with a uncertain status code, per aggregate interval. + duration_bad: The duration the aggregate is defined and marked as bad (measured in milliseconds). + duration_good: The duration the aggregate is defined and marked as good (measured in milliseconds). + duration_uncertain: The duration the aggregate is defined and marked as uncertain (measured in milliseconds). + status_code: The status codes for the raw datapoints. + status_symbol: The status symbols for the raw datapoints. + error: Human readable strings with description of what went wrong (returned by synthetic datapoints queries). + timezone: The timezone to use when displaying the datapoints. """ def __init__( @@ -1064,10 +1063,10 @@ def dump(self, camel_case: bool = True) -> dict[str, Any]: """Dump the datapoints into a json serializable Python data type. Args: - camel_case (bool): Use camelCase for attribute names. Defaults to True. + camel_case: Use camelCase for attribute names. Defaults to True. Returns: - dict[str, Any]: A dictionary representing the instance. + A dictionary representing the instance. """ dumped: dict[str, Any] = { "id": self.id, @@ -1108,15 +1107,14 @@ def to_pandas( # type: ignore [override] """Convert the datapoints into a pandas DataFrame. Args: - include_aggregate_name (bool): Include aggregate in the dataframe columns, if present (separate MultiIndex level) - include_granularity_name (bool): Include granularity in the dataframe columns, if present (separate MultiIndex level) - include_unit (bool): Include the unit_external_id in the dataframe columns, if present (separate MultiIndex level) - include_errors (bool): For synthetic datapoint queries, include a column with errors. - include_status (bool): Include status code and status symbol as separate columns, if available. Also adds the status info - as a separate level in the columns (MultiIndex). + include_aggregate_name: Include aggregate in the dataframe columns, if present (separate MultiIndex level) + include_granularity_name: Include granularity in the dataframe columns, if present (separate MultiIndex level) + include_unit: Include the unit_external_id in the dataframe columns, if present (separate MultiIndex level) + include_errors: For synthetic datapoint queries, include a column with errors. + include_status: Include status code and status symbol as separate columns, if available. Also adds the status info as a separate level in the columns (MultiIndex). Returns: - pandas.DataFrame: The dataframe. + The dataframe. """ local_import("pandas") # throw nice import error early @@ -1278,12 +1276,12 @@ def get( # type: ignore [override] For duplicated time series, returns a list of DatapointsArray. Args: - id (int | None): The id of the item(s) to get. - external_id (str | None): The external_id of the item(s) to get. - instance_id (NodeId | tuple[str, str] | None): The instance_id of the item(s) to get. + id: The id of the item(s) to get. + external_id: The external_id of the item(s) to get. + instance_id: The instance_id of the item(s) to get. Returns: - DatapointsArray | list[DatapointsArray] | None: The requested item(s) + The requested item(s) """ # TODO: Question, can we type annotate without specifying the function? return super().get(id, external_id, instance_id) @@ -1301,14 +1299,13 @@ def to_pandas( # type: ignore [override] """Convert the DatapointsArrayList into a pandas DataFrame. Args: - include_aggregate_name (bool): Include aggregate in the dataframe columns, if present (separate MultiIndex level) - include_granularity_name (bool): Include granularity in the dataframe columns, if present (separate MultiIndex level) - include_unit (bool): Include the unit_external_id in the dataframe columns, if present (separate MultiIndex level) - include_status (bool): Include status code and status symbol as separate columns, if available. Also adds the status info - as a separate level in the columns (MultiIndex). + include_aggregate_name: Include aggregate in the dataframe columns, if present (separate MultiIndex level) + include_granularity_name: Include granularity in the dataframe columns, if present (separate MultiIndex level) + include_unit: Include the unit_external_id in the dataframe columns, if present (separate MultiIndex level) + include_status: Include status code and status symbol as separate columns, if available. Also adds the status info as a separate level in the columns (MultiIndex). Returns: - pandas.DataFrame: The datapoints as a pandas DataFrame. + The datapoints as a pandas DataFrame. """ return concat_dps_dataframe_list( self, @@ -1322,11 +1319,11 @@ def dump(self, camel_case: bool = True, convert_timestamps: bool = False) -> lis """Dump the instance into a json serializable Python data type. Args: - camel_case (bool): Use camelCase for attribute names. Defaults to True. - convert_timestamps (bool): Convert timestamps to ISO 8601 formatted strings. Default: False (returns as integer, milliseconds since epoch) + camel_case: Use camelCase for attribute names. Defaults to True. + convert_timestamps: Convert timestamps to ISO 8601 formatted strings. Default: False (returns as integer, milliseconds since epoch) Returns: - list[dict[str, Any]]: A list of dicts representing the instance. + A list of dicts representing the instance. """ return [dps.dump(camel_case, convert_timestamps) for dps in self] @@ -1358,12 +1355,12 @@ def get( # type: ignore [override] For duplicated time series, returns a list of Datapoints. Args: - id (int | None): The id of the item(s) to get. - external_id (str | None): The external_id of the item(s) to get. - instance_id (InstanceId | tuple[str, str] | None): The instance_id of the item(s) to get. + id: The id of the item(s) to get. + external_id: The external_id of the item(s) to get. + instance_id: The instance_id of the item(s) to get. Returns: - Datapoints | list[Datapoints] | None: The requested item(s) + The requested item(s) """ # TODO: Question, can we type annotate without specifying the function? return super().get(id, external_id, instance_id) @@ -1385,14 +1382,13 @@ def to_pandas( # type: ignore [override] """Convert the datapoints list into a pandas DataFrame. Args: - include_aggregate_name (bool): Include aggregate in the dataframe columns, if present (separate MultiIndex level) - include_granularity_name (bool): Include granularity in the dataframe columns, if present (separate MultiIndex level) - include_unit (bool): Include the unit_external_id in the dataframe columns, if present (separate MultiIndex level) - include_status (bool): Include status code and status symbol as separate columns, if available. Also adds the status info - as a separate level in the columns (MultiIndex). + include_aggregate_name: Include aggregate in the dataframe columns, if present (separate MultiIndex level) + include_granularity_name: Include granularity in the dataframe columns, if present (separate MultiIndex level) + include_unit: Include the unit_external_id in the dataframe columns, if present (separate MultiIndex level) + include_status: Include status code and status symbol as separate columns, if available. Also adds the status info as a separate level in the columns (MultiIndex). Returns: - pandas.DataFrame: The datapoints list as a pandas DataFrame. + The datapoints list as a pandas DataFrame. """ return concat_dps_dataframe_list( self, diff --git a/cognite/client/data_classes/datapoints_subscriptions.py b/cognite/client/data_classes/datapoints_subscriptions.py index 3acf4b4f71..5242d50503 100644 --- a/cognite/client/data_classes/datapoints_subscriptions.py +++ b/cognite/client/data_classes/datapoints_subscriptions.py @@ -59,15 +59,15 @@ class DatapointSubscription(DatapointSubscriptionCore): This is the read version of a subscription, used when reading subscriptions from CDF. Args: - external_id (ExternalId): Externally provided ID for the subscription. Must be unique. - partition_count (int): The maximum effective parallelism of this subscription (the number of clients that can read from it concurrently) will be limited to this number, but a higher partition count will cause a higher time overhead. - created_time (int): Time when the subscription was created in CDF in milliseconds since Jan 1, 1970. - last_updated_time (int): Time when the subscription was last updated in CDF in milliseconds since Jan 1, 1970. - time_series_count (int | None): The number of time series in the subscription. None if no timeseries. - filter (Filter | None): If present, the subscription is defined by this filter. - name (str | None): No description. - description (str | None): A summary explanation for the subscription. - data_set_id (int | None): The id of the dataset this subscription belongs to. + external_id: Externally provided ID for the subscription. Must be unique. + partition_count: The maximum effective parallelism of this subscription (the number of clients that can read from it concurrently) will be limited to this number, but a higher partition count will cause a higher time overhead. + created_time: Time when the subscription was created in CDF in milliseconds since Jan 1, 1970. + last_updated_time: Time when the subscription was last updated in CDF in milliseconds since Jan 1, 1970. + time_series_count: The number of time series in the subscription. None if no timeseries. + filter: If present, the subscription is defined by this filter. + name: No description. + description: A summary explanation for the subscription. + data_set_id: The id of the dataset this subscription belongs to. """ def __init__( @@ -120,14 +120,14 @@ class DataPointSubscriptionWrite(DatapointSubscriptionCore): A subscription can either be defined directly by a list of time series ids or indirectly by a filter. Args: - external_id (str): Externally provided ID for the subscription. Must be unique. - partition_count (int): The maximum effective parallelism of this subscription (the number of clients that can read from it concurrently) will be limited to this number, but a higher partition count will cause a higher time overhead. The partition count must be between 1 and 100. CAVEAT: This cannot change after the subscription has been created. - time_series_ids (list[ExternalId] | None): List of (external) ids of time series that this subscription will listen to. Not compatible with filter. - instance_ids(list[NodeId] | None): List of instance ids of time series that this subscription will listen to. Not compatible with filter. - filter (Filter | None): A filter DSL (Domain Specific Language) to define advanced filter queries. Not compatible with time_series_ids. - name (str | None): No description. - description (str | None): A summary explanation for the subscription. - data_set_id (int | None): The id of the dataset this subscription belongs to. + external_id: Externally provided ID for the subscription. Must be unique. + partition_count: The maximum effective parallelism of this subscription (the number of clients that can read from it concurrently) will be limited to this number, but a higher partition count will cause a higher time overhead. The partition count must be between 1 and 100. CAVEAT: This cannot change after the subscription has been created. + time_series_ids: List of (external) ids of time series that this subscription will listen to. Not compatible with filter. + instance_ids: List of instance ids of time series that this subscription will listen to. Not compatible with filter. + filter: A filter DSL (Domain Specific Language) to define advanced filter queries. Not compatible with time_series_ids. + name: No description. + description: A summary explanation for the subscription. + data_set_id: The id of the dataset this subscription belongs to. """ def __init__( @@ -176,7 +176,7 @@ class DataPointSubscriptionUpdate(CogniteUpdate): """Changes applied to datapoint subscription Args: - external_id (str): The external ID provided by the client. Must be unique for the resource type. + external_id: The external ID provided by the client. Must be unique for the resource type. """ def __init__(self, external_id: str) -> None: @@ -255,10 +255,9 @@ class TimeSeriesID(CogniteResource): A TimeSeries Identifier to uniquely identify a time series. Args: - id (int | None): A server-generated ID for the object. May be None if the time series - reference is broken (e.g., the time series was deleted or its external_id was changed). - external_id (ExternalId | None): The external ID provided by the client. Must be unique for the resource type. - instance_id (NodeId | None): The ID of an instance in Cognite Data Models. + id: A server-generated ID for the object. May be None if the time series reference is broken (e.g., the time series was deleted or its external_id was changed). + external_id: The external ID provided by the client. Must be unique for the resource type. + instance_id: The ID of an instance in Cognite Data Models. """ def __init__( @@ -401,10 +400,10 @@ class _DatapointSubscriptionBatchWithPartitions: """A batch of data from a subscription. Args: - updates (list[DatapointsUpdate]): List of updates from the subscription, sorted by point in time they were applied to the time series. Every update contains a time series along with a set of changes to that time series. - partitions (list[DatapointSubscriptionPartition]): Which partitions/cursors to use for the next request. Map from partition index to cursor. - has_next (bool): Whether there is more data available at the time of the query. In rare cases, we may return true, even if there is no data available. If that is the case, just continue to query with the updated cursors, and it will eventually return false. - subscription_changes (SubscriptionTimeSeriesUpdate): If present, this object represents changes to the subscription definition. The subscription will now start/stop listening to changes from the time series listed here. + updates: List of updates from the subscription, sorted by point in time they were applied to the time series. Every update contains a time series along with a set of changes to that time series. + partitions: Which partitions/cursors to use for the next request. Map from partition index to cursor. + has_next: Whether there is more data available at the time of the query. In rare cases, we may return true, even if there is no data available. If that is the case, just continue to query with the updated cursors, and it will eventually return false. + subscription_changes: If present, this object represents changes to the subscription definition. The subscription will now start/stop listening to changes from the time series listed here. """ updates: list[DatapointsUpdate] diff --git a/cognite/client/data_classes/documents.py b/cognite/client/data_classes/documents.py index 1f8306a7b6..36bb36a003 100644 --- a/cognite/client/data_classes/documents.py +++ b/cognite/client/data_classes/documents.py @@ -24,9 +24,9 @@ class DocumentsGeoJsonGeometry(CogniteResource): """Represents the points, curves and surfaces in the coordinate space. Args: - type (Literal['Point', 'MultiPoint', 'LineString', 'MultiLineString', 'Polygon', 'MultiPolygon', 'GeometryCollection']): The geometry type. - coordinates (list | None): An array of the coordinates of the geometry. The structure of the elements in this array is determined by the type of geometry. - geometries (Collection[Geometry] | None): No description. + type: The geometry type. + coordinates: An array of the coordinates of the geometry. The structure of the elements in this array is determined by the type of geometry. + geometries: No description. Examples: Point: @@ -112,19 +112,19 @@ class SourceFile(CogniteResource): The source file that a document is derived from. Args: - name (str): The name of the source file. - hash (str | None): The hash of the source file. This is a SHA256 hash of the original file. The hash only covers the file content, and not other CDF metadata. - directory (str | None): The directory the file can be found in. - source (str | None): The source of the file. - mime_type (str | None): The mime type of the file. - size (int | None): The size of the file in bytes. - asset_ids (list[int] | None): The ids of the assets related to this file. - labels (list[Label | str | LabelDefinition] | None): A list of labels associated with this document's source file in CDF. - geo_location (DocumentsGeoJsonGeometry | None): The geolocation of the source file. - dataset_id (int | None): The id if the dataset this file belongs to, if any. - security_categories (list[int] | None): The security category IDs required to access this file. - metadata (dict[str, str] | None): Custom, application specific metadata. String key -> String value. - **_ (Any): No description. + name: The name of the source file. + hash: The hash of the source file. This is a SHA256 hash of the original file. The hash only covers the file content, and not other CDF metadata. + directory: The directory the file can be found in. + source: The source of the file. + mime_type: The mime type of the file. + size: The size of the file in bytes. + asset_ids: The ids of the assets related to this file. + labels: A list of labels associated with this document's source file in CDF. + geo_location: The geolocation of the source file. + dataset_id: The id if the dataset this file belongs to, if any. + security_categories: The security category IDs required to access this file. + metadata: Custom, application specific metadata. String key -> String value. + **_: No description. """ def __init__( @@ -191,26 +191,26 @@ class Document(CogniteResource): A representation of a document in CDF. Args: - id (int): A server-generated ID for the object. - created_time (int): The creation time of the document in CDF in milliseconds since Jan 1, 1970. - source_file (SourceFile): The source file that this document is derived from. - external_id (str | None): The external ID provided by the client. Must be unique for the resource type. - instance_id (InstanceId | None): The instance ID of the node this document is associated with. - title (str | None): The title of the document. - author (str | None): The author of the document. - producer (str | None): The producer of the document. Many document types contain metadata indicating what software or system was used to create the document. - modified_time (int | None): The last time the document was modified in CDF in milliseconds since Jan 1, 1970. - last_indexed_time (int | None): The last time the document was indexed in the search engine, measured in milliseconds since Jan 1, 1970. - mime_type (str | None): The detected mime type of the document. - extension (str | None): Extension of the file (always in lowercase) - page_count (int | None): The number of pages in the document. - type (str | None): The detected type of the document. - language (str | None): The detected language of the document. - truncated_content (str | None): The truncated content of the document. - asset_ids (list[int] | None): The ids of any assets referred to in the document. - labels (list[Label | str | LabelDefinition] | None): The labels attached to the document. - geo_location (DocumentsGeoJsonGeometry | None): The geolocation of the document. - **_ (Any): No description. + id: A server-generated ID for the object. + created_time: The creation time of the document in CDF in milliseconds since Jan 1, 1970. + source_file: The source file that this document is derived from. + external_id: The external ID provided by the client. Must be unique for the resource type. + instance_id: The instance ID of the node this document is associated with. + title: The title of the document. + author: The author of the document. + producer: The producer of the document. Many document types contain metadata indicating what software or system was used to create the document. + modified_time: The last time the document was modified in CDF in milliseconds since Jan 1, 1970. + last_indexed_time: The last time the document was indexed in the search engine, measured in milliseconds since Jan 1, 1970. + mime_type: The detected mime type of the document. + extension: Extension of the file (always in lowercase) + page_count: The number of pages in the document. + type: The detected type of the document. + language: The detected language of the document. + truncated_content: The truncated content of the document. + asset_ids: The ids of any assets referred to in the document. + labels: The labels attached to the document. + geo_location: The geolocation of the document. + **_: No description. """ def __init__( @@ -305,8 +305,8 @@ class Highlight(CogniteResource): This is used in search results to represent the result. Args: - name (list[str]): Matches in name. - content (list[str]): Matches in content. + name: Matches in name. + content: Matches in content. """ name: list[str] @@ -331,8 +331,8 @@ class DocumentHighlight(CogniteResource): This is used in search results to represent the result Args: - highlight (Highlight): The highlight from the document matching search results. - document (Document): The document. + highlight: The highlight from the document matching search results. + document: The document. """ highlight: Highlight diff --git a/cognite/client/data_classes/events.py b/cognite/client/data_classes/events.py index 7899c45563..ac822bf647 100644 --- a/cognite/client/data_classes/events.py +++ b/cognite/client/data_classes/events.py @@ -32,9 +32,9 @@ class EndTimeFilter(CogniteResource): """Either range between two timestamps or isNull filter condition. Args: - max (int | None): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - min (int | None): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - is_null (bool | None): Set to true if you want to search for data with field value not set, false to search for cases where some value is present. + max: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + min: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + is_null: Set to true if you want to search for data with field value not set, false to search for cases where some value is present. """ def __init__(self, max: int | None = None, min: int | None = None, is_null: bool | None = None) -> None: @@ -59,19 +59,19 @@ class Event(WriteableCogniteResourceWithClientRef["EventWrite"]): This is the read version of the Event class. It is used when retrieving existing events. Args: - id (int): A server-generated ID for the object. - last_updated_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - created_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - external_id (str | None): The external ID provided by the client. Must be unique for the resource type. - data_set_id (int | None): The id of the dataset this event belongs to. - start_time (int | None): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - end_time (int | None): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - type (str | None): Type of the event, e.g. 'failure'. - subtype (str | None): SubType of the event, e.g. 'electrical'. - description (str | None): Textual description of the event. - metadata (dict[str, str] | None): Custom, application-specific metadata. String key -> String value. Limits: Maximum length of key is 128 bytes, value 128000 bytes, up to 256 key-value pairs, of total size at most 200000. - asset_ids (Sequence[int] | None): Asset IDs of equipment that this event relates to. - source (str | None): The source of this event. + id: A server-generated ID for the object. + last_updated_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + created_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + external_id: The external ID provided by the client. Must be unique for the resource type. + data_set_id: The id of the dataset this event belongs to. + start_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + end_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + type: Type of the event, e.g. 'failure'. + subtype: SubType of the event, e.g. 'electrical'. + description: Textual description of the event. + metadata: Custom, application-specific metadata. String key -> String value. Limits: Maximum length of key is 128 bytes, value 128000 bytes, up to 256 key-value pairs, of total size at most 200000. + asset_ids: Asset IDs of equipment that this event relates to. + source: The source of this event. """ def __init__( @@ -143,16 +143,16 @@ class EventWrite(WriteableCogniteResource["EventWrite"]): This is the write version of the Event class. It is used when creating new events. Args: - external_id (str | None): The external ID provided by the client. Must be unique for the resource type. - data_set_id (int | None): The id of the dataset this event belongs to. - start_time (int | None): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - end_time (int | None): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - type (str | None): Type of the event, e.g. 'failure'. - subtype (str | None): SubType of the event, e.g. 'electrical'. - description (str | None): Textual description of the event. - metadata (dict[str, str] | None): Custom, application-specific metadata. String key -> String value. Limits: Maximum length of key is 128 bytes, value 128000 bytes, up to 256 key-value pairs, of total size at most 200000. - asset_ids (Sequence[int] | None): Asset IDs of equipment that this event relates to. - source (str | None): The source of this event. + external_id: The external ID provided by the client. Must be unique for the resource type. + data_set_id: The id of the dataset this event belongs to. + start_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + end_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + type: Type of the event, e.g. 'failure'. + subtype: SubType of the event, e.g. 'electrical'. + description: Textual description of the event. + metadata: Custom, application-specific metadata. String key -> String value. Limits: Maximum length of key is 128 bytes, value 128000 bytes, up to 256 key-value pairs, of total size at most 200000. + asset_ids: Asset IDs of equipment that this event relates to. + source: The source of this event. """ def __init__( @@ -202,20 +202,20 @@ class EventFilter(CogniteFilter): """Filter on events filter with exact match Args: - start_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. - end_time (dict[str, Any] | EndTimeFilter | None): Either range between two timestamps or isNull filter condition. - active_at_time (dict[str, Any] | TimestampRange | None): Event is considered active from its startTime to endTime inclusive. If startTime is null, event is never active. If endTime is null, event is active from startTime onwards. activeAtTime filter will match all events that are active at some point from min to max, from min, or to max, depending on which of min and max parameters are specified. - metadata (dict[str, str] | None): Custom, application specific metadata. String key -> String value. Limits: Maximum length of key is 128 bytes, value 128000 bytes, up to 256 key-value pairs, of total size at most 200000. - asset_ids (Sequence[int] | None): Asset IDs of equipment that this event relates to. - asset_external_ids (SequenceNotStr[str] | None): Asset External IDs of equipment that this event relates to. - asset_subtree_ids (Sequence[dict[str, Any]] | None): Only include events that have a related asset in a subtree rooted at any of these assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. - data_set_ids (Sequence[dict[str, Any]] | None): Only include events that belong to these datasets. - source (str | None): The source of this event. - type (str | None): Type of the event, e.g 'failure'. - subtype (str | None): SubType of the event, e.g 'electrical'. - created_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. - last_updated_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. - external_id_prefix (str | None): Filter by this (case-sensitive) prefix for the external ID. + start_time: Range between two timestamps. + end_time: Either range between two timestamps or isNull filter condition. + active_at_time: Event is considered active from its startTime to endTime inclusive. If startTime is null, event is never active. If endTime is null, event is active from startTime onwards. activeAtTime filter will match all events that are active at some point from min to max, from min, or to max, depending on which of min and max parameters are specified. + metadata: Custom, application specific metadata. String key -> String value. Limits: Maximum length of key is 128 bytes, value 128000 bytes, up to 256 key-value pairs, of total size at most 200000. + asset_ids: Asset IDs of equipment that this event relates to. + asset_external_ids: Asset External IDs of equipment that this event relates to. + asset_subtree_ids: Only include events that have a related asset in a subtree rooted at any of these assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. + data_set_ids: Only include events that belong to these datasets. + source: The source of this event. + type: Type of the event, e.g 'failure'. + subtype: SubType of the event, e.g 'electrical'. + created_time: Range between two timestamps. + last_updated_time: Range between two timestamps. + external_id_prefix: Filter by this (case-sensitive) prefix for the external ID. """ def __init__( @@ -269,8 +269,8 @@ class EventUpdate(CogniteUpdate): """Changes will be applied to event. Args: - id (int): A server-generated ID for the object. - external_id (str): The external ID provided by the client. Must be unique for the resource type. + id: A server-generated ID for the object. + external_id: The external ID provided by the client. Must be unique for the resource type. """ class _PrimitiveEventUpdate(CognitePrimitiveUpdate): diff --git a/cognite/client/data_classes/extractionpipelines.py b/cognite/client/data_classes/extractionpipelines.py index 7ed12a2543..8e21e31659 100644 --- a/cognite/client/data_classes/extractionpipelines.py +++ b/cognite/client/data_classes/extractionpipelines.py @@ -28,10 +28,10 @@ class ExtractionPipelineContact(CogniteResource): """A contact for an extraction pipeline Args: - name (str | None): Name of contact - email (str | None): Email address of contact - role (str | None): Role of contact, such as Owner, Maintainer, etc. - send_notification (bool | None): Whether to send notifications to this contact or not + name: Name of contact + email: Email address of contact + role: Role of contact, such as Owner, Maintainer, etc. + send_notification: Whether to send notifications to this contact or not """ def __init__( @@ -61,7 +61,7 @@ class ExtractionPipelineNotificationConfiguration(CogniteResource): """Extraction pipeline notification configuration Args: - allowed_not_seen_range_in_minutes (int | None): Time in minutes to pass without any Run. Null if extraction pipeline is not checked. + allowed_not_seen_range_in_minutes: Time in minutes to pass without any Run. Null if extraction pipeline is not checked. """ @@ -78,18 +78,18 @@ class ExtractionPipelineCore(WriteableCogniteResource["ExtractionPipelineWrite"] """An extraction pipeline is a representation of a process writing data to CDF, such as an extractor or an ETL tool. Args: - external_id (str): The external ID provided by the client. Must be unique for the resource type. - name (str | None): The name of the extraction pipeline. - description (str | None): The description of the extraction pipeline. - data_set_id (int | None): The id of the dataset this extraction pipeline related with. - raw_tables (list[dict[str, str]] | None): list of raw tables in list format: [{"dbName": "value", "tableName" : "value"}]. - schedule (str | None): One of None/On trigger/Continuous/cron regex. - contacts (list[ExtractionPipelineContact] | None): list of contacts - metadata (dict[str, str] | None): Custom, application specific metadata. String key -> String value. Limits: Maximum length of key is 128 bytes, value 10240 bytes, up to 256 key-value pairs, of total size at most 10240. - source (str | None): Source text value for extraction pipeline. - documentation (str | None): Documentation text value for extraction pipeline. - notification_config (ExtractionPipelineNotificationConfiguration | None): Notification configuration for the extraction pipeline. - created_by (str | None): Extraction pipeline creator, usually an email. + external_id: The external ID provided by the client. Must be unique for the resource type. + name: The name of the extraction pipeline. + description: The description of the extraction pipeline. + data_set_id: The id of the dataset this extraction pipeline related with. + raw_tables: list of raw tables in list format: [{"dbName": "value", "tableName" : "value"}]. + schedule: One of None/On trigger/Continuous/cron regex. + contacts: list of contacts + metadata: Custom, application specific metadata. String key -> String value. Limits: Maximum length of key is 128 bytes, value 10240 bytes, up to 256 key-value pairs, of total size at most 10240. + source: Source text value for extraction pipeline. + documentation: Documentation text value for extraction pipeline. + notification_config: Notification configuration for the extraction pipeline. + created_by: Extraction pipeline creator, usually an email. """ @@ -135,25 +135,25 @@ class ExtractionPipeline(ExtractionPipelineCore): This is the read version of the ExtractionPipeline class, which is used when retrieving extraction pipelines. Args: - id (int): A server-generated ID for the object. - external_id (str): The external ID provided by the client. Must be unique for the resource type. - name (str): The name of the extraction pipeline. - description (str | None): The description of the extraction pipeline. - data_set_id (int): The id of the dataset this extraction pipeline related with. - raw_tables (list[dict[str, str]] | None): list of raw tables in list format: [{"dbName": "value", "tableName" : "value"}]. - last_success (int | None): Milliseconds value of last success status. - last_failure (int | None): Milliseconds value of last failure status. - last_message (str | None): Message of last failure. - last_seen (int | None): Milliseconds value of last seen status. - schedule (str | None): One of None/On trigger/Continuous/cron regex. - contacts (list[ExtractionPipelineContact] | None): list of contacts - metadata (dict[str, str] | None): Custom, application specific metadata. String key -> String value. Limits: Maximum length of key is 128 bytes, value 10240 bytes, up to 256 key-value pairs, of total size at most 10240. - source (str | None): Source text value for extraction pipeline. - documentation (str | None): Documentation text value for extraction pipeline. - notification_config (ExtractionPipelineNotificationConfiguration | None): Notification configuration for the extraction pipeline. - created_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - last_updated_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - created_by (str | None): Extraction pipeline creator, usually an email. + id: A server-generated ID for the object. + external_id: The external ID provided by the client. Must be unique for the resource type. + name: The name of the extraction pipeline. + description: The description of the extraction pipeline. + data_set_id: The id of the dataset this extraction pipeline related with. + raw_tables: list of raw tables in list format: [{"dbName": "value", "tableName" : "value"}]. + last_success: Milliseconds value of last success status. + last_failure: Milliseconds value of last failure status. + last_message: Message of last failure. + last_seen: Milliseconds value of last seen status. + schedule: One of None/On trigger/Continuous/cron regex. + contacts: list of contacts + metadata: Custom, application specific metadata. String key -> String value. Limits: Maximum length of key is 128 bytes, value 10240 bytes, up to 256 key-value pairs, of total size at most 10240. + source: Source text value for extraction pipeline. + documentation: Documentation text value for extraction pipeline. + notification_config: Notification configuration for the extraction pipeline. + created_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + last_updated_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + created_by: Extraction pipeline creator, usually an email. """ def __init__( @@ -258,18 +258,18 @@ class ExtractionPipelineWrite(ExtractionPipelineCore): This is the write version of the ExtractionPipeline class, which is used when creating extraction pipelines. Args: - external_id (str): The external ID provided by the client. Must be unique for the resource type. - name (str): The name of the extraction pipeline. - data_set_id (int): The id of the dataset this extraction pipeline related with. - description (str | None): The description of the extraction pipeline. - raw_tables (list[dict[str, str]] | None): list of raw tables in list format: [{"dbName": "value", "tableName" : "value"}]. - schedule (str | None): One of None/On trigger/Continuous/cron regex. - contacts (list[ExtractionPipelineContact] | None): list of contacts - metadata (dict[str, str] | None): Custom, application specific metadata. String key -> String value. Limits: Maximum length of key is 128 bytes, value 10240 bytes, up to 256 key-value pairs, of total size at most 10240. - source (str | None): Source text value for extraction pipeline. - documentation (str | None): Documentation text value for extraction pipeline. - notification_config (ExtractionPipelineNotificationConfiguration | None): Notification configuration for the extraction pipeline. - created_by (str | None): Extraction pipeline creator, usually an email. + external_id: The external ID provided by the client. Must be unique for the resource type. + name: The name of the extraction pipeline. + data_set_id: The id of the dataset this extraction pipeline related with. + description: The description of the extraction pipeline. + raw_tables: list of raw tables in list format: [{"dbName": "value", "tableName" : "value"}]. + schedule: One of None/On trigger/Continuous/cron regex. + contacts: list of contacts + metadata: Custom, application specific metadata. String key -> String value. Limits: Maximum length of key is 128 bytes, value 10240 bytes, up to 256 key-value pairs, of total size at most 10240. + source: Source text value for extraction pipeline. + documentation: Documentation text value for extraction pipeline. + notification_config: Notification configuration for the extraction pipeline. + created_by: Extraction pipeline creator, usually an email. """ def __init__( @@ -330,8 +330,8 @@ class ExtractionPipelineUpdate(CogniteUpdate): """Changes applied to an extraction pipeline Args: - id (int): A server-generated ID for the object. - external_id (str): The external ID provided by the client. Must be unique for the resource type. + id: A server-generated ID for the object. + external_id: The external ID provided by the client. Must be unique for the resource type. """ class _PrimitiveExtractionPipelineUpdate(CognitePrimitiveUpdate): @@ -436,9 +436,9 @@ class ExtractionPipelineRunCore(WriteableCogniteResource["ExtractionPipelineRunW """A representation of an extraction pipeline run. Args: - status (str): success/failure/seen. - message (str | None): Optional status message. - created_time (int | None): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + status: success/failure/seen. + message: Optional status message. + created_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. """ def __init__( @@ -456,11 +456,11 @@ class ExtractionPipelineRun(ExtractionPipelineRunCore): """A representation of an extraction pipeline run. Args: - id (int): A server-generated ID for the object. - extpipe_external_id (str | None): The external ID of the extraction pipeline. - status (str): success/failure/seen. - message (str | None): Optional status message. - created_time (int | None): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + id: A server-generated ID for the object. + extpipe_external_id: The external ID of the extraction pipeline. + status: success/failure/seen. + message: Optional status message. + created_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. """ def __init__( @@ -523,10 +523,10 @@ class ExtractionPipelineRunWrite(ExtractionPipelineRunCore): This is the write version of the ExtractionPipelineRun class, which is used when creating extraction pipeline runs. Args: - extpipe_external_id (str): The external ID of the extraction pipeline. - status (Literal['success', 'failure', 'seen']): success/failure/seen. - message (str | None): Optional status message. - created_time (int | None): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + extpipe_external_id: The external ID of the extraction pipeline. + status: success/failure/seen. + message: Optional status message. + created_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. """ def __init__( @@ -588,7 +588,7 @@ class StringFilter(CogniteFilter): """Filter runs on substrings of the message Args: - substring (str | None): Part of message + substring: Part of message """ def __init__(self, substring: str | None = None) -> None: @@ -599,10 +599,10 @@ class ExtractionPipelineRunFilter(CogniteFilter): """Filter runs with exact matching Args: - external_id (str | None): The external ID of related ExtractionPipeline provided by the client. Must be unique for the resource type. - statuses (SequenceNotStr[str] | None): success/failure/seen. - message (StringFilter | None): message filter. - created_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. + external_id: The external ID of related ExtractionPipeline provided by the client. Must be unique for the resource type. + statuses: success/failure/seen. + message: message filter. + created_time: Range between two timestamps. """ def __init__( @@ -622,10 +622,10 @@ class ExtractionPipelineConfigRevision(CogniteResource): """An extraction pipeline config revision Args: - external_id (str): The external ID of the associated extraction pipeline. - revision (int): The revision number of this config as a positive integer. - description (str | None): Short description of this configuration revision. - created_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + external_id: The external ID of the associated extraction pipeline. + revision: The revision number of this config as a positive integer. + description: Short description of this configuration revision. + created_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. """ def __init__( @@ -654,9 +654,9 @@ class ExtractionPipelineConfigCore(WriteableCogniteResource["ExtractionPipelineC """An extraction pipeline config Args: - external_id (str | None): The external ID of the associated extraction pipeline. - config (str | None): Contents of this configuration revision. - description (str | None): Short description of this configuration revision. + external_id: The external ID of the associated extraction pipeline. + config: Contents of this configuration revision. + description: Short description of this configuration revision. """ def __init__( @@ -674,11 +674,11 @@ class ExtractionPipelineConfig(ExtractionPipelineConfigCore): """An extraction pipeline config Args: - external_id (str): The external ID of the associated extraction pipeline. - config (str | None): Contents of this configuration revision. - revision (int): The revision number of this config as a positive integer. - description (str | None): Short description of this configuration revision. - created_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + external_id: The external ID of the associated extraction pipeline. + config: Contents of this configuration revision. + revision: The revision number of this config as a positive integer. + description: Short description of this configuration revision. + created_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. """ def __init__( @@ -722,9 +722,9 @@ class ExtractionPipelineConfigWrite(ExtractionPipelineConfigCore): """An extraction pipeline config Args: - external_id (str): The external ID of the associated extraction pipeline. - config (str | None): Contents of this configuration revision. - description (str | None): Short description of this configuration revision. + external_id: The external ID of the associated extraction pipeline. + config: Contents of this configuration revision. + description: Short description of this configuration revision. """ def __init__( diff --git a/cognite/client/data_classes/files.py b/cognite/client/data_classes/files.py index 34fbe5650d..778ee4aa73 100644 --- a/cognite/client/data_classes/files.py +++ b/cognite/client/data_classes/files.py @@ -39,20 +39,20 @@ class FileMetadataCore(WriteableCogniteResource["FileMetadataWrite"], ABC): """No description. Args: - external_id (str | None): The external ID provided by the client. Must be unique for the resource type. - instance_id (NodeId | None): The instance ID for the file. (Only applicable for files created in DMS) - name (str): Name of the file. - source (str | None): The source of the file. - mime_type (str | None): File type. E.g., text/plain, application/pdf, ... - metadata (dict[str, str] | None): Custom, application-specific metadata. String key -> String value. Limits: Maximum length of key is 32 bytes, value 512 bytes, up to 16 key-value pairs. - directory (str | None): Directory associated with the file. It must be an absolute, unix-style path. - asset_ids (Sequence[int] | None): No description. - data_set_id (int | None): The dataSet ID for the item. - labels (Sequence[Label] | None): A list of the labels associated with this resource item. - geo_location (GeoLocation | None): The geographic metadata of the file. - source_created_time (int | None): The timestamp for when the file was originally created in the source system. - source_modified_time (int | None): The timestamp for when the file was last modified in the source system. - security_categories (Sequence[int] | None): The security category IDs required to access this file. + external_id: The external ID provided by the client. Must be unique for the resource type. + instance_id: The instance ID for the file. (Only applicable for files created in DMS) + name: Name of the file. + source: The source of the file. + mime_type: File type. E.g., text/plain, application/pdf, ... + metadata: Custom, application-specific metadata. String key -> String value. Limits: Maximum length of key is 32 bytes, value 512 bytes, up to 16 key-value pairs. + directory: Directory associated with the file. It must be an absolute, unix-style path. + asset_ids: No description. + data_set_id: The dataSet ID for the item. + labels: A list of the labels associated with this resource item. + geo_location: The geographic metadata of the file. + source_created_time: The timestamp for when the file was originally created in the source system. + source_modified_time: The timestamp for when the file was last modified in the source system. + security_categories: The security category IDs required to access this file. """ def __init__( @@ -113,25 +113,25 @@ class FileMetadata(FileMetadataCore): This is the read version of FileMetadata, and it is used when retrieving from CDF. Args: - id (int): A server-generated ID for the object. - uploaded (bool): Whether the actual file is uploaded. This field is returned only by the API, it has no effect in a post body. - created_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - last_updated_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - uploaded_time (int | None): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - external_id (str | None): The external ID provided by the client. Must be unique for the resource type. - instance_id (NodeId | None): The Instance ID for the file. (Only applicable for files created in DMS) - name (str): Name of the file. - source (str | None): The source of the file. - mime_type (str | None): File type. E.g., text/plain, application/pdf, ... - metadata (dict[str, str] | None): Custom, application-specific metadata. String key -> String value. Limits: Maximum length of key is 32 bytes, value 512 bytes, up to 16 key-value pairs. - directory (str | None): Directory associated with the file. It must be an absolute, unix-style path. - asset_ids (Sequence[int] | None): No description. - data_set_id (int | None): The dataSet ID for the item. - labels (Sequence[Label] | None): A list of the labels associated with this resource item. - geo_location (GeoLocation | None): The geographic metadata of the file. - source_created_time (int | None): The timestamp for when the file was originally created in the source system. - source_modified_time (int | None): The timestamp for when the file was last modified in the source system. - security_categories (Sequence[int] | None): The security category IDs required to access this file. + id: A server-generated ID for the object. + uploaded: Whether the actual file is uploaded. This field is returned only by the API, it has no effect in a post body. + created_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + last_updated_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + uploaded_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + external_id: The external ID provided by the client. Must be unique for the resource type. + instance_id: The Instance ID for the file. (Only applicable for files created in DMS) + name: Name of the file. + source: The source of the file. + mime_type: File type. E.g., text/plain, application/pdf, ... + metadata: Custom, application-specific metadata. String key -> String value. Limits: Maximum length of key is 32 bytes, value 512 bytes, up to 16 key-value pairs. + directory: Directory associated with the file. It must be an absolute, unix-style path. + asset_ids: No description. + data_set_id: The dataSet ID for the item. + labels: A list of the labels associated with this resource item. + geo_location: The geographic metadata of the file. + source_created_time: The timestamp for when the file was originally created in the source system. + source_modified_time: The timestamp for when the file was last modified in the source system. + security_categories: The security category IDs required to access this file. """ def __init__( @@ -231,20 +231,20 @@ class FileMetadataWrite(FileMetadataCore): This is the write version of FileMetadata, and it is used when inserting or updating files. Args: - name (str): Name of the file. - external_id (str | None): The external ID provided by the client. Must be unique for the resource type. - instance_id (NodeId | None): The Instance ID for the file. (Only applicable for files created in DMS) - source (str | None): The source of the file. - mime_type (str | None): File type. E.g., text/plain, application/pdf, ... - metadata (dict[str, str] | None): Custom, application-specific metadata. String key -> String value. Limits: Maximum length of key is 32 bytes, value 512 bytes, up to 16 key-value pairs. - directory (str | None): Directory associated with the file. It must be an absolute, unix-style path. - asset_ids (Sequence[int] | None): No description. - data_set_id (int | None): The dataSet ID for the item. - labels (Sequence[Label] | None): A list of the labels associated with this resource item. - geo_location (GeoLocation | None): The geographic metadata of the file. - source_created_time (int | None): The timestamp for when the file was originally created in the source system. - source_modified_time (int | None): The timestamp for when the file was last modified in the source system. - security_categories (Sequence[int] | None): The security category IDs required to access this file. + name: Name of the file. + external_id: The external ID provided by the client. Must be unique for the resource type. + instance_id: The Instance ID for the file. (Only applicable for files created in DMS) + source: The source of the file. + mime_type: File type. E.g., text/plain, application/pdf, ... + metadata: Custom, application-specific metadata. String key -> String value. Limits: Maximum length of key is 32 bytes, value 512 bytes, up to 16 key-value pairs. + directory: Directory associated with the file. It must be an absolute, unix-style path. + asset_ids: No description. + data_set_id: The dataSet ID for the item. + labels: A list of the labels associated with this resource item. + geo_location: The geographic metadata of the file. + source_created_time: The timestamp for when the file was originally created in the source system. + source_modified_time: The timestamp for when the file was last modified in the source system. + security_categories: The security category IDs required to access this file. """ def __init__( @@ -309,24 +309,24 @@ class FileMetadataFilter(CogniteFilter): """No description. Args: - name (str | None): Name of the file. - mime_type (str | None): File type. E.g. text/plain, application/pdf, .. - metadata (dict[str, str] | None): Custom, application specific metadata. String key -> String value. Limits: Maximum length of key is 32 bytes, value 512 bytes, up to 16 key-value pairs. - asset_ids (Sequence[int] | None): Only include files that reference these specific asset IDs. - asset_external_ids (SequenceNotStr[str] | None): Only include files that reference these specific asset external IDs. - data_set_ids (Sequence[dict[str, Any]] | None): Only include files that belong to these datasets. - labels (LabelFilter | None): Return only the files matching the specified label(s). - geo_location (GeoLocationFilter | None): Only include files matching the specified geographic relation. - asset_subtree_ids (Sequence[dict[str, Any]] | None): Only include files that have a related asset in a subtree rooted at any of these asset IDs or external IDs. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. - source (str | None): The source of this event. - created_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. - last_updated_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. - uploaded_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. - source_created_time (dict[str, Any] | TimestampRange | None): Filter for files where the sourceCreatedTime field has been set and is within the specified range. - source_modified_time (dict[str, Any] | TimestampRange | None): Filter for files where the sourceModifiedTime field has been set and is within the specified range. - external_id_prefix (str | None): Filter by this (case-sensitive) prefix for the external ID. - directory_prefix (str | None): Filter by this (case-sensitive) prefix for the directory provided by the client. - uploaded (bool | None): Whether or not the actual file is uploaded. This field is returned only by the API, it has no effect in a post body. + name: Name of the file. + mime_type: File type. E.g. text/plain, application/pdf, .. + metadata: Custom, application specific metadata. String key -> String value. Limits: Maximum length of key is 32 bytes, value 512 bytes, up to 16 key-value pairs. + asset_ids: Only include files that reference these specific asset IDs. + asset_external_ids: Only include files that reference these specific asset external IDs. + data_set_ids: Only include files that belong to these datasets. + labels: Return only the files matching the specified label(s). + geo_location: Only include files matching the specified geographic relation. + asset_subtree_ids: Only include files that have a related asset in a subtree rooted at any of these asset IDs or external IDs. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. + source: The source of this event. + created_time: Range between two timestamps. + last_updated_time: Range between two timestamps. + uploaded_time: Range between two timestamps. + source_created_time: Filter for files where the sourceCreatedTime field has been set and is within the specified range. + source_modified_time: Filter for files where the sourceModifiedTime field has been set and is within the specified range. + external_id_prefix: Filter by this (case-sensitive) prefix for the external ID. + directory_prefix: Filter by this (case-sensitive) prefix for the directory provided by the client. + uploaded: Whether or not the actual file is uploaded. This field is returned only by the API, it has no effect in a post body. """ def __init__( @@ -395,9 +395,9 @@ class FileMetadataUpdate(CogniteUpdate): """Changes will be applied to file. Args: - id (int | None): A server-generated ID for the object. - external_id (str | None): The external ID provided by the client. Must be unique for the resource type. - instance_id (NodeId | None): The ID of the file. + id: A server-generated ID for the object. + external_id: The external ID provided by the client. Must be unique for the resource type. + instance_id: The ID of the file. """ def __init__( @@ -551,10 +551,10 @@ class FileMultipartUploadSession( Can be used both as a regular and async context manager. Use Args: - file_metadata (FileMetadata): The created file in CDF. - upload_urls (list[str]): List of upload URLs for the file upload. - upload_id (str): ID of the multipart upload, needed to complete the upload. - cognite_client (AsyncCogniteClient): Cognite client to use for completing the upload. + file_metadata: The created file in CDF. + upload_urls: List of upload URLs for the file upload. + upload_id: ID of the multipart upload, needed to complete the upload. + cognite_client: Cognite client to use for completing the upload. """ def __init__( @@ -574,8 +574,8 @@ async def upload_part_async(self, part_no: int, content: str | bytes | BinaryIO) If `content` does not somehow expose its length, this method may not work on Azure or AWS. Args: - part_no (int): Which part number this is, must be between 0 and `parts` given to `multipart_upload_session` - content (str | bytes | BinaryIO): The content to upload. + part_no: Which part number this is, must be between 0 and `parts` given to `multipart_upload_session` + content: The content to upload. """ if part_no < 0 or part_no > len(self._uploaded_urls): raise IndexError(f"Index out of range: {part_no}, must be between 0 and {len(self._uploaded_urls)}") diff --git a/cognite/client/data_classes/filters.py b/cognite/client/data_classes/filters.py index 1090e3567f..de2e048be1 100644 --- a/cognite/client/data_classes/filters.py +++ b/cognite/client/data_classes/filters.py @@ -107,12 +107,10 @@ def dump(self, camel_case_property: bool = False) -> dict[str, Any]: Dump the filter to a dictionary. Args: - camel_case_property (bool): Whether to camel case the property names. Defaults to False. Typically, - when the filter is used in data modeling, the property names should not be changed, - while when used with Assets, Events, Sequences, or Files, the property names should be camel cased. + camel_case_property: Whether to camel case the property names. Defaults to False. Typically, when the filter is used in data modeling, the property names should not be changed, while when used with Assets, Events, Sequences, or Files, the property names should be camel cased. Returns: - dict[str, Any]: The filter as a dictionary. + The filter as a dictionary. """ return {self._filter_name: self._filter_body(camel_case_property=camel_case_property)} @@ -303,7 +301,7 @@ class And(CompoundFilter): """A filter that combines multiple filters with a logical AND. Args: - *filters (Filter): The filters to combine. + *filters: The filters to combine. Example: A filter that combines an Equals and an In filter: @@ -339,7 +337,7 @@ class Or(CompoundFilter): """A filter that combines multiple filters with a logical OR. Args: - *filters (Filter): The filters to combine. + *filters: The filters to combine. Example: A filter that combines an Equals and an In filter: @@ -373,7 +371,7 @@ class Not(CompoundFilter): """A filter that negates another filter. Args: - filter (Filter): The filter to negate. + filter: The filter to negate. Example: A filter that negates an Equals filter: @@ -412,8 +410,8 @@ class Nested(Filter): """A filter to apply to the node at the other side of a direct relation. Args: - scope (PropertyReference): The direct relation property to traverse. - filter (Filter): The filter to apply. + scope: The direct relation property to traverse. + filter: The filter to apply. Example: Assume you have two Views, viewA and viewB. viewA has a direct relation to viewB called "viewB-ID", @@ -473,8 +471,8 @@ class HasData(Filter): """Return only instances that have data in the provided containers/views. Args: - containers (Sequence[tuple[str, str] | ContainerId] | None): Containers to check for data. - views (Sequence[tuple[str, str, str] | ViewId] | None): Views to check for data. + containers: Containers to check for data. + views: Views to check for data. Example: @@ -527,11 +525,11 @@ class Range(FilterWithProperty): """Filters results based on a range of values. Args: - property (PropertyReference): The property to filter on. - gt (FilterValue | None): Greater than. - gte (FilterValue | None): Greater than or equal to. - lt (FilterValue | None): Less than. - lte (FilterValue | None): Less than or equal to. + property: The property to filter on. + gt: Greater than. + gte: Greater than or equal to. + lt: Less than. + lte: Less than or equal to. Example: Filter that can be used to retrieve all instances with a property value greater than 42: @@ -591,13 +589,12 @@ class Overlaps(Filter): properties. Args: - start_property (PropertyReference): The property to filter on. - end_property (PropertyReference): The property to filter on. - gt (FilterValue | None): Greater than. - gte (FilterValue | None): Greater than or equal to. - lt (FilterValue | None): Less than. - lte (FilterValue | None): Less than or equal to. - + start_property: The property to filter on. + end_property: The property to filter on. + gt: Greater than. + gte: Greater than or equal to. + lt: Less than. + lte: Less than or equal to. Example: Filter that can be used to retrieve all instances with a range overlapping with (42, 100): @@ -669,8 +666,8 @@ class Equals(FilterWithPropertyAndValue): """Filters results based on whether the property equals the provided value. Args: - property (PropertyReference): The property to filter on. - value (FilterValue): The value to filter on. + property: The property to filter on. + value: The value to filter on. Example: Filter than can be used to retrieve items where the property value equals 42: @@ -705,8 +702,8 @@ class In(FilterWithPropertyAndValueList): >>> 1 in [1,2,3] => true Args: - property (PropertyReference): The property to filter on. - values (FilterValueList): The value(s) to filter on. + property: The property to filter on. + values: The value(s) to filter on. Example: Filter than can be used to retrieve items where the property value equals 42 or 43 (or both): @@ -735,7 +732,7 @@ class Exists(FilterWithProperty): """Filters results based on whether the property is set or not. Args: - property (PropertyReference): The property to filter on. + property: The property to filter on. Example: Filter than can be used to retrieve items where the property value is set: @@ -763,8 +760,8 @@ class Prefix(FilterWithPropertyAndValue): is a list, the list starts with the provided values. Args: - property (PropertyReference): The property to filter on. - value (FilterValue): The value to filter on. + property: The property to filter on. + value: The value to filter on. Example: Filter than can be used to retrieve items where the property value starts with "somePrefix": @@ -803,8 +800,8 @@ class ContainsAny(FilterWithPropertyAndValueList): >>> 1 in [1,2,3] => ERROR Args: - property (PropertyReference): The property to filter on. - values (FilterValueList): The value(s) to filter on. + property: The property to filter on. + values: The value(s) to filter on. Example: Filter than can be used to retrieve items where the property value contains either 42 or 43: @@ -827,8 +824,8 @@ class ContainsAll(FilterWithPropertyAndValueList): """Returns results where the referenced property contains _all_ of the provided values. Args: - property (PropertyReference): The property to filter on. - values (FilterValueList): The value to filter on. + property: The property to filter on. + values: The value to filter on. Example: Filter than can be used to retrieve items where the property value contains both 42 and 43: @@ -882,8 +879,8 @@ class InAssetSubtree(FilterWithPropertyAndValueList): that is in the subtree rooted at any of the provided IDs. Args: - property (PropertyReference): The property to filter on, e.g. 'assetId' or 'assetExternalId'. - values (FilterValueList): The value(s) to filter on. + property: The property to filter on, e.g. 'assetId' or 'assetExternalId'. + values: The value(s) to filter on. Example: @@ -910,7 +907,7 @@ class InstanceReferences(Filter): """Data modeling filter which matches instances with these fully qualified references. Args: - references (Sequence[InstanceId] | Sequence[tuple[str, str]] | Sequence[dict[str, str]]): The instance references. + references: The instance references. Example: Filter than can be used to retrieve instances where their space/externalId matches any of the provided values: @@ -954,8 +951,8 @@ class SpaceFilter(FilterWithProperty): """Filters instances based on the space. Args: - space (str | SequenceNotStr[str]): The space (or spaces) to filter on. - instance_type (Literal['node', 'edge']): Type of instance to filter on. Defaults to "node". + space: The space (or spaces) to filter on. + instance_type: Type of instance to filter on. Defaults to "node". Example: Filter than can be used to retrieve nodes from space "space1" or "space2": @@ -1001,7 +998,7 @@ class IsNull(Not): """Data modeling filter for instances whose property is null, effectively a negated Exists-filter. Args: - property (SequenceNotStr[str]): The property to filter on. + property: The property to filter on. Example: Filter than can be used to retrieve instances where the property value is not set: diff --git a/cognite/client/data_classes/functions.py b/cognite/client/data_classes/functions.py index f1354db943..1a5a9321de 100644 --- a/cognite/client/data_classes/functions.py +++ b/cognite/client/data_classes/functions.py @@ -39,10 +39,10 @@ class FunctionHandle(Protocol): must be named "handle" and can take any of the following named only arguments: Args: - client (CogniteClient | None): Cognite client. - data (dict[str, object] | None): Input data to the function. - secrets (dict[str, str] | None): Secrets passed to the function. - function_call_info (dict[str, object] | None): Function call information. + client: Cognite client. + data: Input data to the function. + secrets: Secrets passed to the function. + function_call_info: Function call information. Example: .. code-block:: python @@ -54,8 +54,8 @@ def handle( # Do something with the data return {"result": "success"} - Returns: - object: Return value of the function. Any JSON serializable object is allowed. + : + Return value of the function. Any JSON serializable object is allowed. """ async def __call__( @@ -70,13 +70,13 @@ async def __call__( """Function handle protocol. Args: - client (CogniteClient | None): Cognite client. - data (dict[str, object] | None): Input data to the function. - secrets (dict[str, str] | None): Secrets passed to the function. - function_call_info (dict[str, object] | None): Function call information. + client: Cognite client. + data: Input data to the function. + secrets: Secrets passed to the function. + function_call_info: Function call information. Returns: - object: Return value of the function. Any JSON serializable object is allowed. + Return value of the function. Any JSON serializable object is allowed. """ ... @@ -85,18 +85,18 @@ class FunctionCore(WriteableCogniteResourceWithClientRef["FunctionWrite"], ABC): """A representation of a Cognite Function. Args: - name (str): Name of the function. - external_id (str | None): External id of the function. - description (str | None): Description of the function. - owner (str | None): Owner of the function. - file_id (int): File id of the code represented by this object. - function_path (str): Relative path from the root folder to the file containing the `handle` function. Defaults to `handler.py`. Must be on posix path format. - secrets (dict[str, str] | None): Secrets attached to the function ((key, value) pairs). - env_vars (dict[str, str] | None): User specified environment variables on the function ((key, value) pairs). - cpu (float | None): Number of CPU cores per function. Allowed range and default value are given by the `limits endpoint. `_, and None translates to the API default. On Azure, only the default value is used. - memory (float | None): Memory per function measured in GB. Allowed range and default value are given by the `limits endpoint. `_, and None translates to the API default. On Azure, only the default value is used. - runtime (RunTime | None): Runtime of the function. Allowed values are ["py310", "py311", "py312"]. The runtime "py312" resolves to the latest version of the Python 3.12 series. - metadata (dict[str, str] | None): Metadata associated with a function as a set of key:value pairs. + name: Name of the function. + external_id: External id of the function. + description: Description of the function. + owner: Owner of the function. + file_id: File id of the code represented by this object. + function_path: Relative path from the root folder to the file containing the `handle` function. Defaults to `handler.py`. Must be on posix path format. + secrets: Secrets attached to the function ((key, value) pairs). + env_vars: User specified environment variables on the function ((key, value) pairs). + cpu: Number of CPU cores per function. Allowed range and default value are given by the `limits endpoint. `_, and None translates to the API default. On Azure, only the default value is used. + memory: Memory per function measured in GB. Allowed range and default value are given by the `limits endpoint. `_, and None translates to the API default. On Azure, only the default value is used. + runtime: Runtime of the function. Allowed values are ["py310", "py311", "py312"]. The runtime "py312" resolves to the latest version of the Python 3.12 series. + metadata: Metadata associated with a function as a set of key:value pairs. """ def __init__( @@ -133,24 +133,24 @@ class Function(FunctionCore): This is the read version, which is used when retrieving a function. Args: - id (int): ID of the function. - created_time (int): Created time in UNIX. - name (str): Name of the function. - external_id (str | None): External id of the function. - description (str | None): Description of the function. - owner (str | None): Owner of the function. - status (str): Status of the function. - file_id (int): File id of the code represented by this object. - function_path (str): Relative path from the root folder to the file containing the `handle` function. Defaults to `handler.py`. Must be on posix path format. - secrets (dict[str, str] | None): Secrets attached to the function ((key, value) pairs). - env_vars (dict[str, str] | None): User specified environment variables on the function ((key, value) pairs). - cpu (float | None): Number of CPU cores per function. Allowed range and default value are given by the `limits endpoint. `_, and None translates to the API default. On Azure, only the default value is used. - memory (float | None): Memory per function measured in GB. Allowed range and default value are given by the `limits endpoint. `_, and None translates to the API default. On Azure, only the default value is used. - runtime (RunTime | None): Runtime of the function. Allowed values are ["py310", "py311", "py312"]. The runtime "py312" resolves to the latest version of the Python 3.12 series. - runtime_version (str | None): The complete specification of the function runtime with major, minor and patch version numbers. - metadata (dict[str, str] | None): Metadata associated with a function as a set of key:value pairs. - error (dict | None): Dictionary with keys "message" and "trace", which is populated if deployment fails. - last_called (int | None): Last time the function was called, in UNIX timestamp milliseconds. + id: ID of the function. + created_time: Created time in UNIX. + name: Name of the function. + external_id: External id of the function. + description: Description of the function. + owner: Owner of the function. + status: Status of the function. + file_id: File id of the code represented by this object. + function_path: Relative path from the root folder to the file containing the `handle` function. Defaults to `handler.py`. Must be on posix path format. + secrets: Secrets attached to the function ((key, value) pairs). + env_vars: User specified environment variables on the function ((key, value) pairs). + cpu: Number of CPU cores per function. Allowed range and default value are given by the `limits endpoint. `_, and None translates to the API default. On Azure, only the default value is used. + memory: Memory per function measured in GB. Allowed range and default value are given by the `limits endpoint. `_, and None translates to the API default. On Azure, only the default value is used. + runtime: Runtime of the function. Allowed values are ["py310", "py311", "py312"]. The runtime "py312" resolves to the latest version of the Python 3.12 series. + runtime_version: The complete specification of the function runtime with major, minor and patch version numbers. + metadata: Metadata associated with a function as a set of key:value pairs. + error: Dictionary with keys "message" and "trace", which is populated if deployment fails. + last_called: Last time the function was called, in UNIX timestamp milliseconds. """ def __init__( @@ -219,7 +219,7 @@ def _load(cls, resource: dict[str, Any]) -> Self: ) def as_write(self) -> FunctionWrite: - """Returns a writeable version of this function.""" + """a writeable version of this function.""" if self.file_id is None or self.name is None: raise ValueError("file_id and name are required to create a function") return FunctionWrite( @@ -241,11 +241,11 @@ async def call_async(self, data: dict[str, object] | None = None, wait: bool = T """`Call this particular function. `_ Args: - data (dict[str, object] | None): Input data to the function (JSON serializable). This data is passed deserialized into the function through one of the arguments called data. **WARNING:** Secrets or other confidential information should not be passed via this argument. There is a dedicated `secrets` argument in FunctionsAPI.create() for this purpose. - wait (bool): Wait until the function call is finished. Defaults to True. + data: Input data to the function (JSON serializable). This data is passed deserialized into the function through one of the arguments called data. **WARNING:** Secrets or other confidential information should not be passed via this argument. There is a dedicated `secrets` argument in FunctionsAPI.create() for this purpose. + wait: Wait until the function call is finished. Defaults to True. Returns: - FunctionCall: A function call object. + A function call object. """ return await self._cognite_client.functions.call(id=self.id, data=data, wait=wait) @@ -264,14 +264,14 @@ async def list_calls_async( """List all calls to this function. Args: - status (str | None): Status of the call. Possible values ["Running", "Failed", "Completed", "Timeout"]. - schedule_id (int | None): Schedule id from which the call belongs (if any). - start_time (dict[str, int] | None): Start time of the call. Possible keys are `min` and `max`, with values given as time stamps in ms. - end_time (dict[str, int] | None): End time of the call. Possible keys are `min` and `max`, with values given as time stamps in ms. - limit (int | None): Maximum number of function calls to list. Pass in -1, float('inf') or None to list all Function Calls. + status: Status of the call. Possible values ["Running", "Failed", "Completed", "Timeout"]. + schedule_id: Schedule id from which the call belongs (if any). + start_time: Start time of the call. Possible keys are `min` and `max`, with values given as time stamps in ms. + end_time: End time of the call. Possible keys are `min` and `max`, with values given as time stamps in ms. + limit: Maximum number of function calls to list. Pass in -1, float('inf') or None to list all Function Calls. Returns: - FunctionCallList: List of function calls + List of function calls """ return await self._cognite_client.functions.calls.list( function_id=self.id, @@ -305,10 +305,10 @@ async def list_schedules_async(self, limit: int | None = DEFAULT_LIMIT_READ) -> """`List all schedules associated with this function. `_ Args: - limit (int | None): Maximum number of schedules to list. Pass in -1, float('inf') or None to list all. + limit: Maximum number of schedules to list. Pass in -1, float('inf') or None to list all. Returns: - FunctionSchedulesList: List of function schedules + List of function schedules """ return await self._cognite_client.functions.schedules.list(function_id=self.id, limit=limit) @@ -320,10 +320,10 @@ async def retrieve_call_async(self, id: int) -> FunctionCall | None: """`Retrieve call by id. `_ Args: - id (int): ID of the call. + id: ID of the call. Returns: - FunctionCall | None: Requested function call or None if not found. + Requested function call or None if not found. """ return await self._cognite_client.functions.calls.retrieve(call_id=id, function_id=self.id) @@ -353,20 +353,20 @@ class FunctionWrite(FunctionCore): This is the write version, which is used when creating a function. Args: - name (str): Name of the function. - file_id (int): File id of the code represented by this object. - external_id (str | None): External id of the function. - description (str | None): Description of the function. - owner (str | None): Owner of the function. - function_path (str): Relative path from the root folder to the file containing the `handle` function. Defaults to `handler.py`. Must be on posix path format. - secrets (dict[str, str] | None): Secrets attached to the function ((key, value) pairs). - env_vars (dict[str, str] | None): User specified environment variables on the function ((key, value) pairs). - cpu (float | None): Number of CPU cores per function. Allowed range and default value are given by the `limits endpoint. `_, and None translates to the API default. On Azure, only the default value is used. - memory (float | None): Memory per function measured in GB. Allowed range and default value are given by the `limits endpoint. `_, and None translates to the API default. On Azure, only the default value is used. - runtime (RunTime | None): Runtime of the function. Allowed values are ["py310", "py311", "py312"]. The runtime "py312" resolves to the latest version of the Python 3.12 series. - metadata (dict[str, str] | None): Metadata associated with a function as a set of key:value pairs. - index_url (str | None): Specify a different python package index, allowing for packages published in private repositories. Supports basic HTTP authentication as described in pip basic authentication. See the documentation for additional information related to the security risks of using this option. - extra_index_urls (list[str] | None): Extra package index URLs to use when building the function, allowing for packages published in private repositories. Supports basic HTTP authentication as described in pip basic authentication. See the documentation for additional information related to the security risks of using this option. + name: Name of the function. + file_id: File id of the code represented by this object. + external_id: External id of the function. + description: Description of the function. + owner: Owner of the function. + function_path: Relative path from the root folder to the file containing the `handle` function. Defaults to `handler.py`. Must be on posix path format. + secrets: Secrets attached to the function ((key, value) pairs). + env_vars: User specified environment variables on the function ((key, value) pairs). + cpu: Number of CPU cores per function. Allowed range and default value are given by the `limits endpoint. `_, and None translates to the API default. On Azure, only the default value is used. + memory: Memory per function measured in GB. Allowed range and default value are given by the `limits endpoint. `_, and None translates to the API default. On Azure, only the default value is used. + runtime: Runtime of the function. Allowed values are ["py310", "py311", "py312"]. The runtime "py312" resolves to the latest version of the Python 3.12 series. + metadata: Metadata associated with a function as a set of key:value pairs. + index_url: Specify a different python package index, allowing for packages published in private repositories. Supports basic HTTP authentication as described in pip basic authentication. See the documentation for additional information related to the security risks of using this option. + extra_index_urls: Extra package index URLs to use when building the function, allowing for packages published in private repositories. Supports basic HTTP authentication as described in pip basic authentication. See the documentation for additional information related to the security risks of using this option. """ def __init__( @@ -423,7 +423,7 @@ def _load(cls, resource: dict[str, Any]) -> FunctionWrite: ) def as_write(self) -> FunctionWrite: - """Returns this FunctionWrite instance.""" + """this FunctionWrite instance.""" return self @@ -465,11 +465,11 @@ class FunctionScheduleCore(WriteableCogniteResourceWithClientRef["FunctionSchedu """A representation of a Cognite Function Schedule. Args: - name (str): Name of the function schedule. - function_id (int | None): Id of the function. - function_external_id (str | None): External id of the function. - description (str | None): Description of the function schedule. - cron_expression (str): Cron expression + name: Name of the function schedule. + function_id: Id of the function. + function_external_id: External id of the function. + description: Description of the function schedule. + cron_expression: Cron expression """ def __init__( @@ -492,15 +492,15 @@ class FunctionSchedule(FunctionScheduleCore): This is the read version, which is used when retrieving a function schedule. Args: - id (int): ID of the schedule. - name (str): Name of the function schedule. - function_id (int | None): ID of the function. - function_external_id (str | None): External id of the function. - description (str | None): Description of the function schedule. - created_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - cron_expression (str): Cron expression - session_id (int): ID of the session running with the schedule. - when (str): When the schedule will trigger, in human readable text (server generated from cron_expression). + id: ID of the schedule. + name: Name of the function schedule. + function_id: ID of the function. + function_external_id: External id of the function. + description: Description of the function schedule. + created_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + cron_expression: Cron expression + session_id: ID of the session running with the schedule. + when: When the schedule will trigger, in human readable text (server generated from cron_expression). """ def __init__( @@ -542,7 +542,7 @@ def _load(cls, resource: dict[str, Any]) -> Self: ) def as_write(self) -> FunctionScheduleWrite: - """Returns a writeable version of this function schedule.""" + """a writeable version of this function schedule.""" if self.cron_expression is None or self.name is None: raise ValueError("cron_expression or name are required to create a FunctionSchedule") @@ -559,8 +559,8 @@ async def get_input_data_async(self) -> dict | None: """ Retrieve the input data to the associated function. - Returns: - dict | None: Input data to the associated function or None if not set. This data is passed deserialized into the function through the data argument. + : + Input data to the associated function or None if not set. This data is passed deserialized into the function through the data argument. """ if self.id is None: raise ValueError("FunctionSchedule is missing 'id'") @@ -575,16 +575,13 @@ class FunctionScheduleWrite(FunctionScheduleCore): """A representation of a Cognite Function Schedule. Args: - name (str): Name of the function schedule. - cron_expression (str): Cron expression - function_id (int | None): ID of the function. - function_external_id (str | None): External ID of the function. - description (str | None): Description of the function schedule. - data (dict | None): Input data to the function (only present if provided on the schedule). This data is passed deserialized into the function through one of the arguments called data. WARNING: Secrets or other confidential information should not be passed via the data object. There is a dedicated secrets object in the request body to "Create functions" for this purpose. - nonce (str | None): Nonce retrieved from sessions API when creating a session. This will be used to bind the - session before executing the function. The corresponding access token will be passed to the - function and used to instantiate the client of the handle() function. You can create a session - via the Sessions API. + name: Name of the function schedule. + cron_expression: Cron expression + function_id: ID of the function. + function_external_id: External ID of the function. + description: Description of the function schedule. + data: Input data to the function (only present if provided on the schedule). This data is passed deserialized into the function through one of the arguments called data. WARNING: Secrets or other confidential information should not be passed via the data object. There is a dedicated secrets object in the request body to "Create functions" for this purpose. + nonce: Nonce retrieved from sessions API when creating a session. This will be used to bind the session before executing the function. The corresponding access token will be passed to the function and used to instantiate the client of the handle() function. You can create a session via the Sessions API. """ def __init__( @@ -621,7 +618,7 @@ def _load(cls, resource: dict[str, Any]) -> FunctionScheduleWrite: ) def as_write(self) -> FunctionScheduleWrite: - """Returns this FunctionScheduleWrite instance.""" + """this FunctionScheduleWrite instance.""" return self @@ -652,7 +649,7 @@ class FunctionSchedulesList( _RESOURCE = FunctionSchedule def as_write(self) -> FunctionScheduleWriteList: - """Returns a writeable version of this function schedule.""" + """a writeable version of this function schedule.""" return FunctionScheduleWriteList([f.as_write() for f in self.data]) @@ -664,7 +661,7 @@ class FunctionList(WriteableCogniteResourceList[FunctionWrite, Function], IdTran _RESOURCE = Function def as_write(self) -> FunctionWriteList: - """Returns a writeable version of this function.""" + """a writeable version of this function.""" return FunctionWriteList([f.as_write() for f in self.data]) @@ -672,14 +669,14 @@ class FunctionCall(CogniteResourceWithClientRef): """A representation of a Cognite Function call. Args: - id (int): A server-generated ID for the object. - start_time (int): Start time of the call, measured in number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - end_time (int | None): End time of the call, measured in number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - scheduled_time (int | None): Scheduled time of the call, measured in number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - status (str): Status of the function call ("Running", "Completed" or "Failed"). - schedule_id (int | None): The schedule id belonging to the call. - error (dict | None): Error from the function call. It contains an error message and the stack trace. - function_id (int): No description. + id: A server-generated ID for the object. + start_time: Start time of the call, measured in number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + end_time: End time of the call, measured in number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + scheduled_time: Scheduled time of the call, measured in number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + status: Status of the function call ("Running", "Completed" or "Failed"). + schedule_id: The schedule id belonging to the call. + error: Error from the function call. It contains an error message and the stack trace. + function_id: No description. """ def __init__( @@ -718,8 +715,8 @@ def _load(cls, resource: dict[str, Any]) -> Self: async def get_response_async(self) -> dict[str, object] | None: """Retrieve the response from this function call. - Returns: - dict[str, object] | None: Response from the function call. + : + Response from the function call. """ call_id, function_id = self._get_identifiers_or_raise(self.id, self.function_id) return await self._cognite_client.functions.calls.get_response(call_id=call_id, function_id=function_id) @@ -731,7 +728,7 @@ def get_response(self) -> dict[str, object] | None: async def get_logs_async(self) -> FunctionCallLog: """`Retrieve logs for this function call. `_ - Returns: + : FunctionCallLog: Log for the function call. """ call_id, function_id = self._get_identifiers_or_raise(self.id, self.function_id) @@ -782,8 +779,8 @@ class FunctionCallLogEntry(CogniteResource): """A log entry for a function call. Args: - timestamp (int | None): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - message (str): Single line from stdout / stderr. + timestamp: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + message: Single line from stdout / stderr. """ def __init__( @@ -814,9 +811,11 @@ def to_text(self, with_timestamps: bool = False) -> str: """Return a new-line delimited string of the log entry messages, optionally with entry timestamps. Args: - with_timestamps (bool): Whether to include entry timestamps in the output. Defaults to False. + with_timestamps: Whether to include entry timestamps in the output. Defaults to False. Returns: - str: new-line delimited log entries. + No description. + : + new-line delimited log entries. """ return "\n".join(entry._format(with_timestamps) for entry in self) @@ -825,11 +824,11 @@ class FunctionsLimits(CogniteResource): """Service limits for the associated project. Args: - timeout_minutes (int): Timeout of each function call. - cpu_cores (dict[str, float]): The number of CPU cores per function execution (i.e. function call). - memory_gb (dict[str, float]): The amount of available memory in GB per function execution (i.e. function call). - runtimes (list[RunTime]): Available runtimes. For example, "py312" translates to the latest version of the Python 3.12 series. - response_size_mb (int | None): Maximum response size of function calls. + timeout_minutes: Timeout of each function call. + cpu_cores: The number of CPU cores per function execution (i.e. function call). + memory_gb: The amount of available memory in GB per function execution (i.e. function call). + runtimes: Available runtimes. For example, "py312" translates to the latest version of the Python 3.12 series. + response_size_mb: Maximum response size of function calls. """ def __init__( @@ -861,7 +860,7 @@ class FunctionsStatus(CogniteResource): """Activation Status for the associated project. Args: - status (str): Activation Status for the associated project. + status: Activation Status for the associated project. """ def __init__(self, status: str) -> None: diff --git a/cognite/client/data_classes/geospatial.py b/cognite/client/data_classes/geospatial.py index e4b7fdcf51..86c1787603 100644 --- a/cognite/client/data_classes/geospatial.py +++ b/cognite/client/data_classes/geospatial.py @@ -27,10 +27,10 @@ class FeatureTypeCore(WriteableCogniteResource["FeatureTypeWrite"], ABC): """A representation of a feature type in the geospatial API. Args: - external_id (str): The external ID provided by the client. Must be unique for the resource type. - data_set_id (int | None): The ID of the dataset this feature type belongs to. - properties (dict[str, Any] | None): The properties of the feature type. - search_spec (dict[str, Any] | None): The search spec of the feature type. + external_id: The external ID provided by the client. Must be unique for the resource type. + data_set_id: The ID of the dataset this feature type belongs to. + properties: The properties of the feature type. + search_spec: The search spec of the feature type. """ def __init__( @@ -51,12 +51,12 @@ class FeatureType(FeatureTypeCore): This is the read version of the FeatureType class, it is used when retrieving feature types from the api. Args: - external_id (str): The external ID provided by the client. Must be unique for the resource type. - data_set_id (int | None): The ID of the dataset this feature type belongs to. - created_time (int): The created time of the feature type. - last_updated_time (int): The last updated time of the feature type. - properties (dict[str, Any] | None): The properties of the feature type. - search_spec (dict[str, Any] | None): The search spec of the feature type. + external_id: The external ID provided by the client. Must be unique for the resource type. + data_set_id: The ID of the dataset this feature type belongs to. + created_time: The created time of the feature type. + last_updated_time: The last updated time of the feature type. + properties: The properties of the feature type. + search_spec: The search spec of the feature type. """ def __init__( @@ -106,10 +106,10 @@ class FeatureTypeWrite(FeatureTypeCore): This is the write version of the FeatureType class, it is used when creating feature types in the api. Args: - external_id (str): The external ID provided by the client. Must be unique for the resource type. - properties (dict[str, Any]): The properties of the feature type. - data_set_id (int | None): The ID of the dataset this feature type belongs to. - search_spec (dict[str, Any] | None): The search spec of the feature type. + external_id: The external ID provided by the client. Must be unique for the resource type. + properties: The properties of the feature type. + data_set_id: The ID of the dataset this feature type belongs to. + search_spec: The search spec of the feature type. """ def __init__( @@ -210,8 +210,8 @@ class FeatureCore(WriteableCogniteResource["FeatureWrite"], ABC): """A representation of a feature in the geospatial API. Args: - external_id (str | None): The external ID provided by the client. Must be unique for the resource type. - **properties (Any): The properties of the feature. + external_id: The external ID provided by the client. Must be unique for the resource type. + **properties: The properties of the feature. """ PRE_DEFINED_SNAKE_CASE_NAMES = frozenset({to_snake_case(key) for key in RESERVED_PROPERTIES}) @@ -243,11 +243,11 @@ class Feature(FeatureCore): This is the read version of the Feature class, it is used when retrieving features from the api. Args: - external_id (str | None): The external ID provided by the client. Must be unique for the resource type. - created_time (int | None): No description. - last_updated_time (int | None): No description. - data_set_id (int | None): No description. - **properties (Any): The properties of the feature. + external_id: The external ID provided by the client. Must be unique for the resource type. + created_time: No description. + last_updated_time: No description. + data_set_id: No description. + **properties: The properties of the feature. """ def __init__( @@ -294,8 +294,8 @@ class FeatureWrite(FeatureCore): This is the write version of the Feature class, it is used when creating features in the api. Args: - external_id (str): The external ID provided by the client. Must be unique for the resource type. - **properties (Any): The properties of the feature. + external_id: The external ID provided by the client. Must be unique for the resource type. + **properties: The properties of the feature. """ def __init__(self, external_id: str, **properties: Any) -> None: @@ -359,11 +359,11 @@ def to_geopandas(self, geometry: str, camel_case: bool = False) -> geopandas.Geo """Convert the instance into a GeoPandas GeoDataFrame. Args: - geometry (str): The name of the feature type geometry property to use in the GeoDataFrame - camel_case (bool): Convert column names to camel case (e.g. `externalId` instead of `external_id`) + geometry: The name of the feature type geometry property to use in the GeoDataFrame + camel_case: Convert column names to camel case (e.g. `externalId` instead of `external_id`) Returns: - geopandas.GeoDataFrame: The GeoPandas GeoDataFrame. + The GeoPandas GeoDataFrame. Examples: @@ -397,14 +397,14 @@ def from_geopandas( """Convert a GeoDataFrame instance into a FeatureList. Args: - feature_type (FeatureType): The feature type the features will conform to - geodataframe (geopandas.GeoDataFrame): the geodataframe instance to convert into features - external_id_column (str): the geodataframe column to use for the feature external id - property_column_mapping (dict[str, str] | None): provides a mapping from featuretype property names to geodataframe columns - data_set_id_column (str): the geodataframe column to use for the feature dataSet id + feature_type: The feature type the features will conform to + geodataframe: the geodataframe instance to convert into features + external_id_column: the geodataframe column to use for the feature external id + property_column_mapping: provides a mapping from featuretype property names to geodataframe columns + data_set_id_column: the geodataframe column to use for the feature dataSet id Returns: - FeatureList: The list of features converted from the geodataframe rows. + The list of features converted from the geodataframe rows. Examples: @@ -504,9 +504,9 @@ class CoordinateReferenceSystemCore(WriteableCogniteResource["CoordinateReferenc """A representation of a feature in the geospatial API. Args: - srid (int): EPSG code, e.g., 4326. Only valid for geometry types. See https://en.wikipedia.org/wiki/Spatial_reference_system - wkt (str): Well-known text of the geometry, see https://docs.geotools.org/stable/javadocs/org/opengis/referencing/doc-files/WKT.html - proj_string (str): The projection specification string as described in https://proj.org/usage/quickstart.html + srid: EPSG code, e.g., 4326. Only valid for geometry types. See https://en.wikipedia.org/wiki/Spatial_reference_system + wkt: Well-known text of the geometry, see https://docs.geotools.org/stable/javadocs/org/opengis/referencing/doc-files/WKT.html + proj_string: The projection specification string as described in https://proj.org/usage/quickstart.html """ def __init__(self, srid: int, wkt: str, proj_string: str) -> None: @@ -520,9 +520,9 @@ class CoordinateReferenceSystem(CoordinateReferenceSystemCore): This is the read version of the CoordinateReferenceSystem class, it is used when retrieving from the CDF. Args: - srid (int): EPSG code, e.g., 4326. Only valid for geometry types. See https://en.wikipedia.org/wiki/Spatial_reference_system - wkt (str): Well-known text of the geometry, see https://docs.geotools.org/stable/javadocs/org/opengis/referencing/doc-files/WKT.html - proj_string (str): The projection specification string as described in https://proj.org/usage/quickstart.html + srid: EPSG code, e.g., 4326. Only valid for geometry types. See https://en.wikipedia.org/wiki/Spatial_reference_system + wkt: Well-known text of the geometry, see https://docs.geotools.org/stable/javadocs/org/opengis/referencing/doc-files/WKT.html + proj_string: The projection specification string as described in https://proj.org/usage/quickstart.html """ def __init__( @@ -553,9 +553,9 @@ class CoordinateReferenceSystemWrite(CoordinateReferenceSystemCore): """A representation of a feature in the geospatial API. Args: - srid (int): EPSG code, e.g., 4326. Only valid for geometry types. See https://en.wikipedia.org/wiki/Spatial_reference_system - wkt (str): Well-known text of the geometry, see https://docs.geotools.org/stable/javadocs/org/opengis/referencing/doc-files/WKT.html - proj_string (str): The projection specification string as described in https://proj.org/usage/quickstart.html + srid: EPSG code, e.g., 4326. Only valid for geometry types. See https://en.wikipedia.org/wiki/Spatial_reference_system + wkt: Well-known text of the geometry, see https://docs.geotools.org/stable/javadocs/org/opengis/referencing/doc-files/WKT.html + proj_string: The projection specification string as described in https://proj.org/usage/quickstart.html """ def __init__(self, srid: int, wkt: str, proj_string: str) -> None: @@ -640,7 +640,7 @@ class GeospatialGeometryValueComputeFunction(GeospatialGeometryComputeFunction): see https://docs.geotools.org/stable/javadocs/org/opengis/referencing/doc-files/WKT.html Args: - ewkt (str): No description. + ewkt: No description. """ def __init__(self, ewkt: str) -> None: diff --git a/cognite/client/data_classes/hosted_extractors/destinations.py b/cognite/client/data_classes/hosted_extractors/destinations.py index 716b65f1de..565f7dd77f 100644 --- a/cognite/client/data_classes/hosted_extractors/destinations.py +++ b/cognite/client/data_classes/hosted_extractors/destinations.py @@ -44,9 +44,9 @@ class DestinationWrite(_DestinationCore): This is the write/request format of the destination. Args: - external_id (str): The external ID provided by the client. Must be unique for the resource type. - credentials (SessionWrite): Credentials for authenticating towards CDF using a CDF session. - target_data_set_id (int | None): Data set ID the created items are inserted into, if applicable. + external_id: The external ID provided by the client. Must be unique for the resource type. + credentials: Credentials for authenticating towards CDF using a CDF session. + target_data_set_id: Data set ID the created items are inserted into, if applicable. """ @@ -84,11 +84,11 @@ class Destination(_DestinationCore): This is the write/request format of the destination. Args: - external_id (str): The external ID provided by the client. Must be unique for the resource type. - created_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - last_updated_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - session_id (int | None): ID of the session tied to this destination. - target_data_set_id (int | None): Data set ID the created items are inserted into, if applicable. + external_id: The external ID provided by the client. Must be unique for the resource type. + created_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + last_updated_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + session_id: ID of the session tied to this destination. + target_data_set_id: Data set ID the created items are inserted into, if applicable. """ diff --git a/cognite/client/data_classes/hosted_extractors/jobs.py b/cognite/client/data_classes/hosted_extractors/jobs.py index a532bb1216..5f66f6313d 100644 --- a/cognite/client/data_classes/hosted_extractors/jobs.py +++ b/cognite/client/data_classes/hosted_extractors/jobs.py @@ -324,9 +324,9 @@ class JobWrite(_JobCore): This is the write/request format of the job. Args: - external_id (str): The external ID provided by the client. Must be unique for the resource type. - destination_id (str): ID of the destination this job should write to. - source_id (str): ID of the source this job should read from. + external_id: The external ID provided by the client. Must be unique for the resource type. + destination_id: ID of the destination this job should write to. + source_id: ID of the source this job should read from. format: The format of the messages from the source. This is used to convert messages coming from the source system to a format that can be inserted into CDF. config: Configuration for the job. This must match the source. For example, if the source is MQTT, @@ -356,15 +356,15 @@ class Job(_JobCore): This is the read/response format of the job. Args: - external_id (str): The external ID provided by the client. Must be unique for the resource type. - destination_id (str): ID of the destination this job should write to. - source_id (str): ID of the source this job should read from. - format (JobFormat): The format of the messages from the source. This is used to convert messages coming from the source system to a format that can be inserted into CDF. - target_status (TargetStatus): The target status of a job. Set this to start or stop the job. - status (JobStatus): Status of this job. - created_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - last_updated_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - config (JobConfig | None): Configuration for the job. This is specific to the source system. + external_id: The external ID provided by the client. Must be unique for the resource type. + destination_id: ID of the destination this job should write to. + source_id: ID of the source this job should read from. + format: The format of the messages from the source. This is used to convert messages coming from the source system to a format that can be inserted into CDF. + target_status: The target status of a job. Set this to start or stop the job. + status: Status of this job. + created_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + last_updated_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + config: Configuration for the job. This is specific to the source system. """ @@ -476,10 +476,10 @@ class JobLogs(CogniteResource): """Logs for a hosted extractor job. Args: - job_external_id (str): The external ID of the job. - type (Literal['paused', 'startup_error', 'connection_error', 'connected', 'transform_error', 'cdf_write_error', 'ok']): Type of log entry. - created_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - message (str | None): Log message. Not all log entries have messages. + job_external_id: The external ID of the job. + type: Type of log entry. + created_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + message: Log message. Not all log entries have messages. Statuses @@ -521,16 +521,16 @@ class JobMetrics(CogniteResource): """Metrics for a hosted extractor job. Args: - job_external_id (str): External ID of the job this metrics batch belongs to. - timestamp (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Metrics are from the UTC hour this timestamp is ingest. For example, if this timestamp is at 01:43:15, the metrics batch contains metrics from 01:00:00 to 01:43:15. - source_messages (int): Number of messages received from the source system. - cdf_input_values (int): Destination resources successfully transformed and passed to CDF. - cdf_requests (int): Requests made to CDF containing data produced by this job. - transform_failures (int): Source messages that failed to transform. - cdf_write_failures (int): Times the destination received data from transformations, but failed to produce a valid request to CDF. - cdf_skipped_values (int): Values the destination received from the source, then decided to skip due to data type mismatch, invalid content, or other. - cdf_failed_values (int): Values the destination was unable to upload to CDF. - cdf_uploaded_values (int): Values the destination successfully uploaded to CDF. + job_external_id: External ID of the job this metrics batch belongs to. + timestamp: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. Metrics are from the UTC hour this timestamp is ingest. For example, if this timestamp is at 01:43:15, the metrics batch contains metrics from 01:00:00 to 01:43:15. + source_messages: Number of messages received from the source system. + cdf_input_values: Destination resources successfully transformed and passed to CDF. + cdf_requests: Requests made to CDF containing data produced by this job. + transform_failures: Source messages that failed to transform. + cdf_write_failures: Times the destination received data from transformations, but failed to produce a valid request to CDF. + cdf_skipped_values: Values the destination received from the source, then decided to skip due to data type mismatch, invalid content, or other. + cdf_failed_values: Values the destination was unable to upload to CDF. + cdf_uploaded_values: Values the destination successfully uploaded to CDF. """ diff --git a/cognite/client/data_classes/hosted_extractors/mappings.py b/cognite/client/data_classes/hosted_extractors/mappings.py index f71021429b..a9d978b289 100644 --- a/cognite/client/data_classes/hosted_extractors/mappings.py +++ b/cognite/client/data_classes/hosted_extractors/mappings.py @@ -141,10 +141,10 @@ class MappingWrite(_MappingCore): This is the write/request format of a mapping. Args: - external_id (str): The external ID provided by the client. Must be unique for the resource type. - mapping (CustomMapping): The custom mapping. - published (bool): Whether this mapping is published and should be available to be used in jobs. - input (InputMapping | Literal['csv', 'json', 'xml']): The input mapping. Defaults to 'json' + external_id: The external ID provided by the client. Must be unique for the resource type. + mapping: The custom mapping. + published: Whether this mapping is published and should be available to be used in jobs. + input: The input mapping. Defaults to 'json' """ def __init__( @@ -189,12 +189,12 @@ class Mapping(_MappingCore): This is the read/response format of a mapping. Args: - external_id (str): The external ID provided by the client. Must be unique for the resource type. - mapping (CustomMapping): The custom mapping. - published (bool): Whether this mapping is published and should be available to be used in jobs. - input (InputMapping): The input mapping. - created_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - last_updated_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + external_id: The external ID provided by the client. Must be unique for the resource type. + mapping: The custom mapping. + published: Whether this mapping is published and should be available to be used in jobs. + input: The input mapping. + created_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + last_updated_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. """ def __init__( diff --git a/cognite/client/data_classes/hosted_extractors/sources.py b/cognite/client/data_classes/hosted_extractors/sources.py index c57b11e8bd..320eafc198 100644 --- a/cognite/client/data_classes/hosted_extractors/sources.py +++ b/cognite/client/data_classes/hosted_extractors/sources.py @@ -30,7 +30,7 @@ class SourceWrite(CogniteResource, ABC): This is the write/request format of the source resource. Args: - external_id (str): The external ID provided by the client. Must be unique for the resource type. + external_id: The external ID provided by the client. Must be unique for the resource type. """ _type: ClassVar[str] @@ -70,7 +70,7 @@ class Source(WriteableCogniteResource[T_WriteClass], ABC): This is the read/response format of the source resource. Args: - external_id (str): The external ID provided by the client. Must be unique for the resource type. + external_id: The external ID provided by the client. Must be unique for the resource type. """ _type: ClassVar[str] @@ -138,12 +138,12 @@ class EventHubSourceWrite(SourceWrite): This is the write/request format of the source resource. Args: - external_id (str): The external ID provided by the client. Must be unique for the resource type. - host (str): URL of the event hub consumer endpoint. - event_hub_name (str): Name of the event hub - key_name (str): The name of the Event Hub key to use. - key_value (str): Value of the Event Hub key to use for authentication. - consumer_group (str | None): The event hub consumer group to use. Microsoft recommends having a distinct consumer group for each application consuming data from event hub. If left out, this uses the default consumer group. + external_id: The external ID provided by the client. Must be unique for the resource type. + host: URL of the event hub consumer endpoint. + event_hub_name: Name of the event hub + key_name: The name of the Event Hub key to use. + key_value: Value of the Event Hub key to use for authentication. + consumer_group: The event hub consumer group to use. Microsoft recommends having a distinct consumer group for each application consuming data from event hub. If left out, this uses the default consumer group. """ _type = "eventhub" @@ -187,13 +187,13 @@ class EventHubSource(Source): This is the read/response format of the source resource. Args: - external_id (str): The external ID provided by the client. Must be unique for the resource type. - host (str): URL of the event hub consumer endpoint. - event_hub_name (str): Name of the event hub - key_name (str): The name of the Event Hub key to use. - created_time (int): No description. - last_updated_time (int): No description. - consumer_group (str | None): The event hub consumer group to use. Microsoft recommends having a distinct consumer group for each application consuming data from event hub. If left out, this uses the default consumer group. + external_id: The external ID provided by the client. Must be unique for the resource type. + host: URL of the event hub consumer endpoint. + event_hub_name: Name of the event hub + key_name: The name of the Event Hub key to use. + created_time: No description. + last_updated_time: No description. + consumer_group: The event hub consumer group to use. Microsoft recommends having a distinct consumer group for each application consuming data from event hub. If left out, this uses the default consumer group. """ _type = "eventhub" @@ -476,12 +476,12 @@ class KafkaSourceWrite(SourceWrite): This is the write/request format of the kafka resource. Args: - external_id (str): The external ID provided by the client. Must be unique for the resource type. - bootstrap_brokers (Sequence[KafkaBroker]): List of redundant kafka brokers to connect to. - authentication (AuthenticationWrite | None): Authentication information for the kafka source. - use_tls (bool): If true, use TLS when connecting to the broker. - ca_certificate (CACertificateWrite | None): Custom certificate authority certificate to let the source use a self signed certificate. - auth_certificate (AuthCertificateWrite | None): Authentication certificate (if configured) used to authenticate to source. + external_id: The external ID provided by the client. Must be unique for the resource type. + bootstrap_brokers: List of redundant kafka brokers to connect to. + authentication: Authentication information for the kafka source. + use_tls: If true, use TLS when connecting to the broker. + ca_certificate: Custom certificate authority certificate to let the source use a self signed certificate. + auth_certificate: Authentication certificate (if configured) used to authenticate to source. """ _type = "kafka" @@ -535,14 +535,14 @@ class KafkaSource(Source): This is the read/response format of the kafka resource. Args: - external_id (str): The external ID provided by the client. Must be unique for the resource type. - bootstrap_brokers (Sequence[KafkaBroker]): List of redundant kafka brokers to connect to. - created_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - last_updated_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - authentication (Authentication | None): Authentication information for the kafka source. - use_tls (bool): If true, use TLS when connecting to the broker. - ca_certificate (CACertificate | None): Custom certificate authority certificate to let the source use a self signed certificate. - auth_certificate (AuthCertificate | None): Authentication certificate (if configured) used to authenticate to source. + external_id: The external ID provided by the client. Must be unique for the resource type. + bootstrap_brokers: List of redundant kafka brokers to connect to. + created_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + last_updated_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + authentication: Authentication information for the kafka source. + use_tls: If true, use TLS when connecting to the broker. + ca_certificate: Custom certificate authority certificate to let the source use a self signed certificate. + auth_certificate: Authentication certificate (if configured) used to authenticate to source. """ _type = "kafka" @@ -659,12 +659,12 @@ class RestSourceWrite(SourceWrite): This is the write/request format of the rest resource. Args: - external_id (str): The external ID provided by the client. Must be unique for the resource type. - host (str): Host or IP address to connect to. - scheme (Literal['http', 'https']): Type of connection to establish. - port (int | None): Port on server to connect to. Uses default ports based on the scheme if omitted. - ca_certificate (CACertificateWrite | None): Custom certificate authority certificate to let the source use a self signed certificate. - authentication (AuthenticationWrite | None): Authentication details for source. + external_id: The external ID provided by the client. Must be unique for the resource type. + host: Host or IP address to connect to. + scheme: Type of connection to establish. + port: Port on server to connect to. Uses default ports based on the scheme if omitted. + ca_certificate: Custom certificate authority certificate to let the source use a self signed certificate. + authentication: Authentication details for source. """ _type = "rest" @@ -717,14 +717,14 @@ class RestSource(Source): This is the read/response format of the rest resource. Args: - external_id (str): The external ID provided by the client. Must be unique for the resource type. - host (str): Host or IP address to connect to. - scheme (Literal['http', 'https']): Type of connection to establish. - created_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - last_updated_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - port (int | None): Port on server to connect to. Uses default ports based on the scheme if omitted. - ca_certificate (CACertificate | None): Custom certificate authority certificate to let the source use a self signed certificate. - authentication (Authentication | None): Authentication details for source. + external_id: The external ID provided by the client. Must be unique for the resource type. + host: Host or IP address to connect to. + scheme: Type of connection to establish. + created_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + last_updated_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + port: Port on server to connect to. Uses default ports based on the scheme if omitted. + ca_certificate: Custom certificate authority certificate to let the source use a self signed certificate. + authentication: Authentication details for source. """ _type = "rest" diff --git a/cognite/client/data_classes/iam.py b/cognite/client/data_classes/iam.py index a6b3ffa455..e755c2c5d2 100644 --- a/cognite/client/data_classes/iam.py +++ b/cognite/client/data_classes/iam.py @@ -65,12 +65,12 @@ class GroupCore(WriteableCogniteResource["GroupWrite"], ABC): """No description. Args: - name (str): Name of the group. - source_id (str | None): ID of the group in the source. If this is the same ID as a group in the IdP, a service account in that group will implicitly be a part of this group as well. Can not be used together with 'members'. - capabilities (list[Capability] | Capability | None): List of capabilities (acls) this group should grant its users. - attributes (GroupAttributes | None): Attributes of the group, this scopes down access based on the attributes specified. - metadata (dict[str, str] | None): Custom, immutable application specific metadata. String key -> String value. Limits: Key are at most 32 bytes. Values are at most 512 bytes. Up to 16 key-value pairs. Total size is at most 4096. - members (Literal['allUserAccounts'] | list[str] | None): Specifies which users are members of the group. Can not be used together with 'source_id'. + name: Name of the group. + source_id: ID of the group in the source. If this is the same ID as a group in the IdP, a service account in that group will implicitly be a part of this group as well. Can not be used together with 'members'. + capabilities: List of capabilities (acls) this group should grant its users. + attributes: Attributes of the group, this scopes down access based on the attributes specified. + metadata: Custom, immutable application specific metadata. String key -> String value. Limits: Key are at most 32 bytes. Values are at most 512 bytes. Up to 16 key-value pairs. Total size is at most 4096. + members: Specifies which users are members of the group. Can not be used together with 'source_id'. """ def __init__( @@ -119,15 +119,15 @@ class Group(GroupCore): Groups can either be managed through the external identity provider for the project or managed by CDF. Args: - id (int): No description. - name (str): Name of the group. - source_id (str | None): ID of the group in the source. If this is the same ID as a group in the IdP, a service account in that group will implicitly be a part of this group as well. Can not be used together with 'members'. - capabilities (list[Capability] | Capability | None): List of capabilities (acls) this group should grant its users. - attributes (GroupAttributes | None): Attributes of the group, this scopes down access based on the attributes specified. - is_deleted (bool | None): No description. - deleted_time (int | None): No description. - metadata (dict[str, str] | None): Custom, immutable application specific metadata. String key -> String value. Limits: Key are at most 32 bytes. Values are at most 512 bytes. Up to 16 key-value pairs. Total size is at most 4096. - members (Literal['allUserAccounts'] | list[str] | None): Specifies which users are members of the group. Can not be used together with 'source_id'. + id: No description. + name: Name of the group. + source_id: ID of the group in the source. If this is the same ID as a group in the IdP, a service account in that group will implicitly be a part of this group as well. Can not be used together with 'members'. + capabilities: List of capabilities (acls) this group should grant its users. + attributes: Attributes of the group, this scopes down access based on the attributes specified. + is_deleted: No description. + deleted_time: No description. + metadata: Custom, immutable application specific metadata. String key -> String value. Limits: Key are at most 32 bytes. Values are at most 512 bytes. Up to 16 key-value pairs. Total size is at most 4096. + members: Specifies which users are members of the group. Can not be used together with 'source_id'. """ def __init__( @@ -217,12 +217,12 @@ class GroupWrite(GroupCore): Groups can either be managed through the external identity provider for the project or managed by CDF. Args: - name (str): Name of the group. - source_id (str | None): ID of the group in the source. If this is the same ID as a group in the IdP, a service account in that group will implicitly be a part of this group as well. Can not be used together with 'members'. - capabilities (list[Capability] | None): List of capabilities (acls) this group should grant its users. - attributes (GroupAttributes | None): Attributes of the group, this scopes down access based on the attributes specified. - metadata (dict[str, str] | None): Custom, immutable application specific metadata. String key -> String value. Limits: Key are at most 32 bytes. Values are at most 512 bytes. Up to 16 key-value pairs. Total size is at most 4096. - members (Literal['allUserAccounts'] | list[str] | None): Specifies which users are members of the group. Can not be used together with 'source_id'. + name: Name of the group. + source_id: ID of the group in the source. If this is the same ID as a group in the IdP, a service account in that group will implicitly be a part of this group as well. Can not be used together with 'members'. + capabilities: List of capabilities (acls) this group should grant its users. + attributes: Attributes of the group, this scopes down access based on the attributes specified. + metadata: Custom, immutable application specific metadata. String key -> String value. Limits: Key are at most 32 bytes. Values are at most 512 bytes. Up to 16 key-value pairs. Total size is at most 4096. + members: Specifies which users are members of the group. Can not be used together with 'source_id'. """ def __init__( @@ -297,7 +297,7 @@ class SecurityCategoryCore(WriteableCogniteResource["SecurityCategoryWrite"], AB """No description. Args: - name (str | None): Name of the security category + name: Name of the security category """ def __init__(self, name: str | None = None) -> None: @@ -309,8 +309,8 @@ class SecurityCategory(SecurityCategoryCore): This is the read version of a security category, which is used when retrieving security categories. Args: - id (int): Id of the security category - name (str | None): Name of the security category + id: Id of the security category + name: Name of the security category """ def __init__(self, id: int, name: str | None) -> None: @@ -334,7 +334,7 @@ class SecurityCategoryWrite(SecurityCategoryCore): Args: - name (str): Name of the security category + name: Name of the security category """ def __init__(self, name: str) -> None: @@ -369,8 +369,8 @@ class ProjectSpec(CogniteResource): """A CDF project spec Args: - url_name (str): The url name for the project - groups (list[int]): Group ids in the project + url_name: The url name for the project + groups: Group ids in the project """ def __init__(self, url_name: str, groups: list[int]) -> None: @@ -396,9 +396,9 @@ class TokenInspection(CogniteResource): """Current login status Args: - subject (str): Subject (sub claim) of JWT. - projects (list[ProjectSpec]): Projects this token is valid for. - capabilities (ProjectCapabilityList): Capabilities associated with this token. + subject: Subject (sub claim) of JWT. + projects: Projects this token is valid for. + capabilities: Capabilities associated with this token. """ def __init__(self, subject: str, projects: list[ProjectSpec], capabilities: ProjectCapabilityList) -> None: @@ -449,11 +449,11 @@ class CreatedSession(CogniteResource): """Session creation related information Args: - id (int): ID of the created session. - status (SessionStatus): Current status of the session. - nonce (str): Nonce to be passed to the internal service that will bind the session - type (SessionType | None): Credentials kind used to create the session. - client_id (str | None): Client ID in identity provider. Returned only if the session was created using client credentials + id: ID of the created session. + status: Current status of the session. + nonce: Nonce to be passed to the internal service that will bind the session + type: Credentials kind used to create the session. + client_id: Client ID in identity provider. Returned only if the session was created using client credentials """ def __init__( @@ -485,12 +485,12 @@ class Session(CogniteResource): """Session status Args: - id (int): ID of the session. - type (SessionType): Credentials kind used to create the session. - status (SessionStatus): Current status of the session. - creation_time (int): Session creation time, in milliseconds since 1970 - expiration_time (int): Session expiry time, in milliseconds since 1970. This value is updated on refreshing a token - client_id (str | None): Client ID in identity provider. Returned only if the session was created using client credentials + id: ID of the session. + type: Credentials kind used to create the session. + status: Current status of the session. + creation_time: Session creation time, in milliseconds since 1970 + expiration_time: Session expiry time, in milliseconds since 1970. This value is updated on refreshing a token + client_id: Client ID in identity provider. Returned only if the session was created using client credentials """ def __init__( @@ -529,8 +529,8 @@ class ClientCredentials(CogniteResource): """Client credentials for session creation Args: - client_id (str): Client ID from identity provider. - client_secret (str): Client secret from identity provider. + client_id: Client ID from identity provider. + client_secret: Client secret from identity provider. """ def __init__(self, client_id: str, client_secret: str) -> None: diff --git a/cognite/client/data_classes/labels.py b/cognite/client/data_classes/labels.py index ddefd754b0..7e96d989f6 100644 --- a/cognite/client/data_classes/labels.py +++ b/cognite/client/data_classes/labels.py @@ -22,10 +22,10 @@ class LabelDefinitionCore(WriteableCogniteResource["LabelDefinitionWrite"], ABC) This is the parent for the reading and write versions. Args: - external_id (str): The external ID provided by the client. Must be unique for the resource type. - name (str | None): Name of the label. - description (str | None): Description of the label. - data_set_id (int | None): The id of the dataset this label belongs to. + external_id: The external ID provided by the client. Must be unique for the resource type. + name: Name of the label. + description: Description of the label. + data_set_id: The id of the dataset this label belongs to. """ def __init__( @@ -46,11 +46,11 @@ class LabelDefinition(LabelDefinitionCore): This is the read version of the LabelDefinition class. It is used when retrieving existing label definitions. Args: - external_id (str): The external ID provided by the client. Must be unique for the resource type. - name (str): Name of the label. - created_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - description (str | None): Description of the label. - data_set_id (int | None): The id of the dataset this label belongs to. + external_id: The external ID provided by the client. Must be unique for the resource type. + name: Name of the label. + created_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + description: Description of the label. + data_set_id: The id of the dataset this label belongs to. """ def __init__( @@ -96,10 +96,10 @@ class LabelDefinitionWrite(LabelDefinitionCore): This is the write version of the LabelDefinition class. It is used when creating new label definitions. Args: - external_id (str): The external ID provided by the client. Must be unique for the resource type. - name (str): Name of the label. - description (str | None): Description of the label. - data_set_id (int | None): The id of the dataset this label belongs to. + external_id: The external ID provided by the client. Must be unique for the resource type. + name: Name of the label. + description: Description of the label. + data_set_id: The id of the dataset this label belongs to. """ def __init__( @@ -134,9 +134,9 @@ class LabelDefinitionFilter(CogniteFilter): """Filter on labels definitions with strict matching. Args: - name (str | None): Returns the label definitions matching that name. - external_id_prefix (str | None): filter label definitions with external ids starting with the prefix specified - data_set_ids (list[dict[str, Any]] | None): Only include labels that belong to these datasets. + name: Returns the label definitions matching that name. + external_id_prefix: filter label definitions with external ids starting with the prefix specified + data_set_ids: Only include labels that belong to these datasets. """ def __init__( @@ -168,7 +168,7 @@ class Label(CogniteResource): """A label assigned to a resource. Args: - external_id (str): The external id to the attached label. + external_id: The external id to the attached label. """ def __init__(self, external_id: str) -> None: @@ -208,8 +208,8 @@ class LabelFilter(CogniteFilter): """Return only the resource matching the specified label constraints. Args: - contains_any (list[str] | None): The resource item contains at least one of the listed labels. The labels are defined by a list of external ids. - contains_all (list[str] | None): The resource item contains all the listed labels. The labels are defined by a list of external ids. + contains_any: The resource item contains at least one of the listed labels. The labels are defined by a list of external ids. + contains_all: The resource item contains all the listed labels. The labels are defined by a list of external ids. Examples: diff --git a/cognite/client/data_classes/limits.py b/cognite/client/data_classes/limits.py index 8526923bf7..df6177fe5f 100644 --- a/cognite/client/data_classes/limits.py +++ b/cognite/client/data_classes/limits.py @@ -13,8 +13,8 @@ class Limit(CogniteResource): Service and limit names are always in `lower_snake_case`. Args: - limit_id (str): Limits are identified by an id containing the service name and a service-scoped limit name. - value (float | int): The numeric value of the limit. + limit_id: Limits are identified by an id containing the service name and a service-scoped limit name. + value: The numeric value of the limit. """ def __init__(self, limit_id: str, value: float | int) -> None: diff --git a/cognite/client/data_classes/postgres_gateway/tables.py b/cognite/client/data_classes/postgres_gateway/tables.py index d7fcb81abc..bfed606649 100644 --- a/cognite/client/data_classes/postgres_gateway/tables.py +++ b/cognite/client/data_classes/postgres_gateway/tables.py @@ -129,9 +129,9 @@ class RawTableWrite(TableWrite): This is the read/response format of the raw table. Args: - tablename (str): Name of the foreign table. - options (RawTableOptions): Table options - columns (Sequence[Column] | ColumnList): Foreign table columns. + tablename: Name of the foreign table. + options: Table options + columns: Foreign table columns. """ @@ -163,8 +163,8 @@ class ViewTableWrite(TableWrite): This is the read/response format of the custom table. Args: - tablename (str): Name of the foreign table. - options (ViewId): Table options + tablename: Name of the foreign table. + options: Table options """ _type = "view" @@ -192,8 +192,8 @@ class Table(_TableCore, ABC): This is the read/response format of the custom table. Args: - tablename (str): Name of the foreign table. - created_time (int | None): Time when the table was created + tablename: Name of the foreign table. + created_time: Time when the table was created """ @@ -225,10 +225,10 @@ class RawTable(Table): This is the read/response format of the raw table. Args: - tablename (str): Name of the foreign table. - options (RawTableOptions): Table options - columns (ColumnList): Foreign table columns. - created_time (int | None): Time when the table was created. + tablename: Name of the foreign table. + options: Table options + columns: Foreign table columns. + created_time: Time when the table was created. """ @@ -270,9 +270,9 @@ class ViewTable(Table): This is the read/response format of the custom table. Args: - tablename (str): Name of the foreign table. - options (ViewId): Table options - created_time (int | None): Time when the table was created. + tablename: Name of the foreign table. + options: Table options + created_time: Time when the table was created. """ _type = "view" diff --git a/cognite/client/data_classes/postgres_gateway/users.py b/cognite/client/data_classes/postgres_gateway/users.py index 77467a9c9b..1124277a4e 100644 --- a/cognite/client/data_classes/postgres_gateway/users.py +++ b/cognite/client/data_classes/postgres_gateway/users.py @@ -38,7 +38,7 @@ class UserWrite(_UserCore): This is the write/request format of the user. Args: - credentials (SessionCredentials | None): Credentials for authenticating towards CDF using a CDF session. + credentials: Credentials for authenticating towards CDF using a CDF session. """ @@ -68,10 +68,10 @@ class User(_UserCore): This is the read/response format of the user for list and retrieve endpoints. Args: - username (str): Username to authenticate the user on the DB. - created_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - last_updated_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - session_id (int | None): ID of the session tied to this user. + username: Username to authenticate the user on the DB. + created_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + last_updated_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + session_id: ID of the session tied to this user. """ @@ -106,12 +106,12 @@ class UserCreated(User): This is the read/response format of the user for the create endpoint Args: - host(str): Host of the DB. - username (str): Username to authenticate the user on the DB. - password (str): Password to authenticate the user on the DB. - created_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - last_updated_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - session_id (int | None): ID of the session tied to this user. + host: Host of the DB. + username: Username to authenticate the user on the DB. + password: Password to authenticate the user on the DB. + created_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + last_updated_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + session_id: ID of the session tied to this user. """ @@ -166,9 +166,9 @@ def dump(self, camel_case: Literal[True] = True) -> dict[str, Any]: """Dump the instance into a json serializable Python data type. Args: - camel_case (Literal[True]): No description. + camel_case: No description. Returns: - dict[str, Any]: A dictionary representation of the instance. + A dictionary representation of the instance. """ return {"update": self._update_object, "username": self.username} diff --git a/cognite/client/data_classes/principals.py b/cognite/client/data_classes/principals.py index dd7e5a9a21..1ec951a2cd 100644 --- a/cognite/client/data_classes/principals.py +++ b/cognite/client/data_classes/principals.py @@ -52,14 +52,14 @@ class UserPrincipal(Principal): """Represents a user principal in Cognite Data Fusion (CDF). Arguments: - id (str): The ID of an organization user - name (str): Human-readable name of the principal - picture_url (str): URL to a picture of the principal - email (str | None): User email. Do not use this to uniquely identify a user, as it can be changed + id: The ID of an organization user + name: Human-readable name of the principal + picture_url: URL to a picture of the principal + email: User email. Do not use this to uniquely identify a user, as it can be changed and is not guaranteed to be unique. Use the id field instead. - given_name (str | None): The given name of the user - middle_name (str | None): The middle name of the user - family_name (str | None): The family name of the user + given_name: The given name of the user + middle_name: The middle name of the user + family_name: The family name of the user """ @@ -101,8 +101,8 @@ class ServiceAccountCreator(CogniteResource): """The creator of a service account. Arguments: - org_id (str): The ID of an organization. - user_id (str): The ID of an organization user + org_id: The ID of an organization. + user_id: The ID of an organization user """ @@ -118,16 +118,16 @@ class ServicePrincipal(Principal): """Represents a service account principal in Cognite Data Fusion (CDF). Arguments: - id (str): Unique identifier of a service account - name (str): Human-readable name of the service account - created_by (ServiceAccountCreator): The creator of the service account - created_time (int): When the principal was created. It is given as the number of milliseconds + id: Unique identifier of a service account + name: Human-readable name of the service account + created_by: The creator of the service account + created_time: When the principal was created. It is given as the number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - last_updated_time (int): When the principal was last updated. It is given as the number of milliseconds + last_updated_time: When the principal was last updated. It is given as the number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - picture_url (str): URL to a picture of the principal. - external_id (str | None): The external ID provided by the client. Must be unique for the resource type. - description (str | None): A description of the service account. + picture_url: URL to a picture of the principal. + external_id: The external ID provided by the client. Must be unique for the resource type. + description: A description of the service account. """ @@ -179,9 +179,9 @@ class UnknownPrincipal(Principal): Typically, this can happen when a new type of principal is introduced in CDF that is not yet supported by the SDK. Arguments: - id (str): Unique identifier of the principal. - type (str): The type of the principal, which is not recognized by the SDK. - data (dict[str, Any]): Additional data associated with the principal, excluding the 'id' and 'type' fields. + id: Unique identifier of the principal. + type: The type of the principal, which is not recognized by the SDK. + data: Additional data associated with the principal, excluding the 'id' and 'type' fields. """ diff --git a/cognite/client/data_classes/raw.py b/cognite/client/data_classes/raw.py index ba5d83efe7..bd1b854f8c 100644 --- a/cognite/client/data_classes/raw.py +++ b/cognite/client/data_classes/raw.py @@ -24,8 +24,8 @@ class RowCore(WriteableCogniteResource["RowWrite"], ABC): """No description. Args: - key (str): Unique row key - columns (dict[str, Any]): Row data stored as a JSON object. + key: Unique row key + columns: Row data stored as a JSON object. """ def __init__( @@ -61,7 +61,7 @@ def to_pandas(self) -> pandas.DataFrame: # type: ignore[override] """Convert the instance into a pandas DataFrame. Returns: - pandas.DataFrame: The pandas DataFrame representing this instance. + The pandas DataFrame representing this instance. """ pd = local_import("pandas") return pd.DataFrame([self.columns], [self.key]) @@ -75,9 +75,9 @@ class Row(RowCore): This is the read version of the Row class, which is used when retrieving a row. Args: - key (str): Unique row key - columns (dict[str, Any]): Row data stored as a JSON object. - last_updated_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + key: Unique row key + columns: Row data stored as a JSON object. + last_updated_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. """ def __init__( @@ -109,8 +109,8 @@ class RowWrite(RowCore): This is the write version of the Row class, which is used when creating a row. Args: - key (str): Unique row key - columns (dict[str, Any]): Row data stored as a JSON object. + key: Unique row key + columns: Row data stored as a JSON object. """ def __init__(self, key: str, columns: dict[str, Any]) -> None: @@ -130,7 +130,7 @@ def to_pandas(self) -> pandas.DataFrame: # type: ignore[override] """Convert the instance into a pandas DataFrame. Returns: - pandas.DataFrame: The pandas DataFrame representing this instance. + The pandas DataFrame representing this instance. """ pd = local_import("pandas") if not self: @@ -159,8 +159,8 @@ class Table(WriteableCogniteResourceWithClientRef["TableWrite"]): This is the read version of the Table class, which is used when retrieving a table. Args: - name (str): Unique name of the table - created_time (int | None): Time the table was created. + name: Unique name of the table + created_time: Time the table was created. """ def __init__( @@ -193,11 +193,11 @@ async def rows_async(self, key: str | None = None, limit: int | None = None) -> """Get the rows in this table. Args: - key (str | None): Specify a key to return only that row. - limit (int | None): The number of rows to return. + key: Specify a key to return only that row. + limit: The number of rows to return. Returns: - Row | RowList | None: List of tables in this database. + List of tables in this database. """ if self._db_name is None: raise ValueError("Table is not linked to a database, did you instantiate it yourself?") @@ -224,7 +224,7 @@ class TableWrite(WriteableCogniteResource["TableWrite"]): This is the write version of the Table class, which is used when creating a table. Args: - name (str): Unique name of the table + name: Unique name of the table """ def __init__(self, name: str) -> None: @@ -255,8 +255,8 @@ class Database(WriteableCogniteResourceWithClientRef["DatabaseWrite"]): """A NoSQL database to store customer data. Args: - name (str): Unique name of a database. - created_time (int | None): Time the database was created. + name: Unique name of a database. + created_time: Time the database was created. """ def __init__( @@ -281,10 +281,10 @@ async def tables_async(self, limit: int | None = None) -> TableList: """Get the tables in this database. Args: - limit (int | None): The number of tables to return. + limit: The number of tables to return. Returns: - TableList: List of tables in this database. + List of tables in this database. """ if self.name is None: raise ValueError("Unable to list tables, 'name' is not set on instance") @@ -299,7 +299,7 @@ class DatabaseWrite(WriteableCogniteResource["DatabaseWrite"]): """A NoSQL database to store customer data. Args: - name (str): Unique name of a database. + name: Unique name of a database. """ def __init__(self, name: str) -> None: diff --git a/cognite/client/data_classes/relationships.py b/cognite/client/data_classes/relationships.py index b60607929f..8615d378ed 100644 --- a/cognite/client/data_classes/relationships.py +++ b/cognite/client/data_classes/relationships.py @@ -37,16 +37,16 @@ class RelationshipCore(WriteableCogniteResource["RelationshipWrite"], ABC): """Representation of a relationship in CDF, consists of a source and a target and some additional parameters. Args: - external_id (str): External id of the relationship, must be unique within the project. - source_external_id (str): External id of the CDF resource that constitutes the relationship source. - source_type (str): The CDF resource type of the relationship source. Must be one of the specified values. - target_external_id (str): External id of the CDF resource that constitutes the relationship target. - target_type (str): The CDF resource type of the relationship target. Must be one of the specified values. - start_time (int | None): Time, in milliseconds since Jan. 1, 1970, when the relationship became active. If there is no startTime, relationship is active from the beginning of time until endTime. - end_time (int | None): Time, in milliseconds since Jan. 1, 1970, when the relationship became inactive. If there is no endTime, relationship is active from startTime until the present or any point in the future. If endTime and startTime are set, then endTime must be strictly greater than startTime. - confidence (float | None): Confidence value of the existence of this relationship. Generated relationships should provide a realistic score on the likelihood of the existence of the relationship. Relationships without a confidence value can be interpreted at the discretion of each project. - data_set_id (int | None): The id of the dataset this relationship belongs to. - labels (list[Label] | None): A list of the labels associated with this resource item. + external_id: External id of the relationship, must be unique within the project. + source_external_id: External id of the CDF resource that constitutes the relationship source. + source_type: The CDF resource type of the relationship source. Must be one of the specified values. + target_external_id: External id of the CDF resource that constitutes the relationship target. + target_type: The CDF resource type of the relationship target. Must be one of the specified values. + start_time: Time, in milliseconds since Jan. 1, 1970, when the relationship became active. If there is no startTime, relationship is active from the beginning of time until endTime. + end_time: Time, in milliseconds since Jan. 1, 1970, when the relationship became inactive. If there is no endTime, relationship is active from startTime until the present or any point in the future. If endTime and startTime are set, then endTime must be strictly greater than startTime. + confidence: Confidence value of the existence of this relationship. Generated relationships should provide a realistic score on the likelihood of the existence of the relationship. Relationships without a confidence value can be interpreted at the discretion of each project. + data_set_id: The id of the dataset this relationship belongs to. + labels: A list of the labels associated with this resource item. """ _RESOURCE_TYPE_MAP: typing.ClassVar[dict[str, type[Asset | TimeSeries | FileMetadata | Event | Sequence]]] = { @@ -103,20 +103,20 @@ class Relationship(RelationshipCore): This is the read version of the relationship class, it is used when retrieving from CDF. Args: - external_id (str): External id of the relationship, must be unique within the project. - created_time (int): Time, in milliseconds since Jan. 1, 1970, when this relationship was created in CDF. - last_updated_time (int): Time, in milliseconds since Jan. 1, 1970, when this relationship was last updated in CDF. - source_external_id (str): External id of the CDF resource that constitutes the relationship source. - source_type (str): The CDF resource type of the relationship source. Must be one of the specified values. - source (Asset | TimeSeries | FileMetadata | Sequence | Event | dict[str, Any] | None): The full resource referenced by the source_external_id and source_type fields. - target_external_id (str): External id of the CDF resource that constitutes the relationship target. - target_type (str): The CDF resource type of the relationship target. Must be one of the specified values. - target (Asset | TimeSeries | FileMetadata | Sequence | Event | dict[str, Any] | None): The full resource referenced by the target_external_id and target_type fields. - start_time (int | None): Time, in milliseconds since Jan. 1, 1970, when the relationship became active. If there is no startTime, relationship is active from the beginning of time until endTime. - end_time (int | None): Time, in milliseconds since Jan. 1, 1970, when the relationship became inactive. If there is no endTime, relationship is active from startTime until the present or any point in the future. If endTime and startTime are set, then endTime must be strictly greater than startTime. - confidence (float | None): Confidence value of the existence of this relationship. Generated relationships should provide a realistic score on the likelihood of the existence of the relationship. Relationships without a confidence value can be interpreted at the discretion of each project. - data_set_id (int | None): The id of the dataset this relationship belongs to. - labels (SequenceNotStr[Label | str | LabelDefinition | dict] | None): A list of the labels associated with this resource item. + external_id: External id of the relationship, must be unique within the project. + created_time: Time, in milliseconds since Jan. 1, 1970, when this relationship was created in CDF. + last_updated_time: Time, in milliseconds since Jan. 1, 1970, when this relationship was last updated in CDF. + source_external_id: External id of the CDF resource that constitutes the relationship source. + source_type: The CDF resource type of the relationship source. Must be one of the specified values. + source: The full resource referenced by the source_external_id and source_type fields. + target_external_id: External id of the CDF resource that constitutes the relationship target. + target_type: The CDF resource type of the relationship target. Must be one of the specified values. + target: The full resource referenced by the target_external_id and target_type fields. + start_time: Time, in milliseconds since Jan. 1, 1970, when the relationship became active. If there is no startTime, relationship is active from the beginning of time until endTime. + end_time: Time, in milliseconds since Jan. 1, 1970, when the relationship became inactive. If there is no endTime, relationship is active from startTime until the present or any point in the future. If endTime and startTime are set, then endTime must be strictly greater than startTime. + confidence: Confidence value of the existence of this relationship. Generated relationships should provide a realistic score on the likelihood of the existence of the relationship. Relationships without a confidence value can be interpreted at the discretion of each project. + data_set_id: The id of the dataset this relationship belongs to. + labels: A list of the labels associated with this resource item. """ def __init__( @@ -243,16 +243,16 @@ class RelationshipWrite(RelationshipCore): This is the write version of the relationship class, and is used when creating new relationships. Args: - external_id (str): External id of the relationship, must be unique within the project. - source_external_id (str): External id of the CDF resource that constitutes the relationship source. - source_type (RelationshipType): The CDF resource type of the relationship source. Must be one of the specified values. - target_external_id (str): External id of the CDF resource that constitutes the relationship target. - target_type (RelationshipType): The CDF resource type of the relationship target. Must be one of the specified values. - start_time (int | None): Time, in milliseconds since Jan. 1, 1970, when the relationship became active. If there is no startTime, relationship is active from the beginning of time until endTime. - end_time (int | None): Time, in milliseconds since Jan. 1, 1970, when the relationship became inactive. If there is no endTime, relationship is active from startTime until the present or any point in the future. If endTime and startTime are set, then endTime must be strictly greater than startTime. - confidence (float | None): Confidence value of the existence of this relationship. Generated relationships should provide a realistic score on the likelihood of the existence of the relationship. Relationships without a confidence value can be interpreted at the discretion of each project. - data_set_id (int | None): The id of the dataset this relationship belongs to. - labels (SequenceNotStr[Label | str | LabelDefinitionWrite | dict] | None): A list of the labels associated with this resource item. + external_id: External id of the relationship, must be unique within the project. + source_external_id: External id of the CDF resource that constitutes the relationship source. + source_type: The CDF resource type of the relationship source. Must be one of the specified values. + target_external_id: External id of the CDF resource that constitutes the relationship target. + target_type: The CDF resource type of the relationship target. Must be one of the specified values. + start_time: Time, in milliseconds since Jan. 1, 1970, when the relationship became active. If there is no startTime, relationship is active from the beginning of time until endTime. + end_time: Time, in milliseconds since Jan. 1, 1970, when the relationship became inactive. If there is no endTime, relationship is active from startTime until the present or any point in the future. If endTime and startTime are set, then endTime must be strictly greater than startTime. + confidence: Confidence value of the existence of this relationship. Generated relationships should provide a realistic score on the likelihood of the existence of the relationship. Relationships without a confidence value can be interpreted at the discretion of each project. + data_set_id: The id of the dataset this relationship belongs to. + labels: A list of the labels associated with this resource item. """ def __init__( @@ -306,18 +306,18 @@ class RelationshipFilter(CogniteFilter): """Filter on relationships with exact match. Multiple filter elements in one property, e.g. `sourceExternalIds: [ "a", "b" ]`, will return all relationships where the `sourceExternalId` field is either `a` or `b`. Filters in multiple properties will return the relationships that match all criteria. If the filter is not specified it default to an empty filter. Args: - source_external_ids (SequenceNotStr[str] | None): Include relationships that have any of these values in their `sourceExternalId` field - source_types (SequenceNotStr[str] | None): Include relationships that have any of these values in their `sourceType` field - target_external_ids (SequenceNotStr[str] | None): Include relationships that have any of these values in their `targetExternalId` field - target_types (SequenceNotStr[str] | None): Include relationships that have any of these values in their `targetType` field - data_set_ids (typing.Sequence[dict[str, Any]] | None): Either one of `internalId` (int) or `externalId` (str) - start_time (dict[str, int] | None): Range between two timestamps, minimum and maximum milliseconds (inclusive) - end_time (dict[str, int] | None): Range between two timestamps, minimum and maximum milliseconds (inclusive) - confidence (dict[str, int] | None): Range to filter the field for (inclusive). - last_updated_time (dict[str, int] | None): Range to filter the field for (inclusive). - created_time (dict[str, int] | None): Range to filter the field for (inclusive). - active_at_time (dict[str, int] | None): Limits results to those active at any point within the given time range, i.e. if there is any overlap in the intervals [activeAtTime.min, activeAtTime.max] and [startTime, endTime], where both intervals are inclusive. If a relationship does not have a startTime, it is regarded as active from the beginning of time by this filter. If it does not have an endTime is will be regarded as active until the end of time. Similarly, if a min is not supplied to the filter, the min will be implicitly set to the beginning of time, and if a max is not supplied, the max will be implicitly set to the end of time. - labels (LabelFilter | None): Return only the resource matching the specified label constraints. + source_external_ids: Include relationships that have any of these values in their `sourceExternalId` field + source_types: Include relationships that have any of these values in their `sourceType` field + target_external_ids: Include relationships that have any of these values in their `targetExternalId` field + target_types: Include relationships that have any of these values in their `targetType` field + data_set_ids: Either one of `internalId` (int) or `externalId` (str) + start_time: Range between two timestamps, minimum and maximum milliseconds (inclusive) + end_time: Range between two timestamps, minimum and maximum milliseconds (inclusive) + confidence: Range to filter the field for (inclusive). + last_updated_time: Range to filter the field for (inclusive). + created_time: Range to filter the field for (inclusive). + active_at_time: Limits results to those active at any point within the given time range, i.e. if there is any overlap in the intervals [activeAtTime.min, activeAtTime.max] and [startTime, endTime], where both intervals are inclusive. If a relationship does not have a startTime, it is regarded as active from the beginning of time by this filter. If it does not have an endTime is will be regarded as active until the end of time. Similarly, if a min is not supplied to the filter, the min will be implicitly set to the beginning of time, and if a max is not supplied, the max will be implicitly set to the end of time. + labels: Return only the resource matching the specified label constraints. """ def __init__( @@ -359,7 +359,7 @@ class RelationshipUpdate(CogniteUpdate): """Update applied to a single relationship Args: - external_id (str): The external ID provided by the client. Must be unique for the resource type. + external_id: The external ID provided by the client. Must be unique for the resource type. """ diff --git a/cognite/client/data_classes/sequences.py b/cognite/client/data_classes/sequences.py index 49113b57ba..858c85ee37 100644 --- a/cognite/client/data_classes/sequences.py +++ b/cognite/client/data_classes/sequences.py @@ -45,11 +45,11 @@ class SequenceColumnCore(WriteableCogniteResource["SequenceColumnWrite"], ABC): """This represents a column in a sequence. Args: - external_id (str | None): The external ID provided by the client. Must be unique for the resource type. - name (str | None): Name of the column - description (str | None): Description of the column - value_type (ValueType): The type of the column. It can be String, Double or Long. - metadata (dict[str, Any] | None): Custom, application-specific metadata. String key -> String value. The maximum length of key is 32 bytes, value 512 bytes, up to 16 key-value pairs. + external_id: The external ID provided by the client. Must be unique for the resource type. + name: Name of the column + description: Description of the column + value_type: The type of the column. It can be String, Double or Long. + metadata: Custom, application-specific metadata. String key -> String value. The maximum length of key is 32 bytes, value 512 bytes, up to 16 key-value pairs. """ def __init__( @@ -71,13 +71,13 @@ class SequenceColumn(SequenceColumnCore): """This represents a column in a sequence. It is used for reading only. Args: - external_id (str): The external ID provided by the client. Must be unique for the resource type. - created_time (int | None): Time when this column was created in CDF in milliseconds since Jan 1, 1970. - last_updated_time (int | None): The last time this column was updated in CDF, in milliseconds since Jan 1, 1970. - name (str | None): Name of the column - description (str | None): Description of the column - value_type (ValueType): The type of the column. It can be String, Double or Long. - metadata (dict[str, Any] | None): Custom, application-specific metadata. String key -> String value. Maximum length of key is 32 bytes, value 512 bytes, up to 16 key-value pairs. + external_id: The external ID provided by the client. Must be unique for the resource type. + created_time: Time when this column was created in CDF in milliseconds since Jan 1, 1970. + last_updated_time: The last time this column was updated in CDF, in milliseconds since Jan 1, 1970. + name: Name of the column + description: Description of the column + value_type: The type of the column. It can be String, Double or Long. + metadata: Custom, application-specific metadata. String key -> String value. Maximum length of key is 32 bytes, value 512 bytes, up to 16 key-value pairs. """ def __init__( @@ -130,11 +130,11 @@ class SequenceColumnWrite(SequenceColumnCore): """This represents a column in a sequence. This is used for writing only. Args: - external_id (str): The external ID provided by the client. Must be unique for the resource type. - name (str | None): Name of the column - description (str | None): Description of the column - value_type (ValueType): The type of the column. It can be String, Double or Long. - metadata (dict[str, Any] | None): Custom, application-specific metadata. String key -> String value. The maximum length of key is 32 bytes, value 512 bytes, up to 16 key-value pairs. + external_id: The external ID provided by the client. Must be unique for the resource type. + name: Name of the column + description: Description of the column + value_type: The type of the column. It can be String, Double or Long. + metadata: Custom, application-specific metadata. String key -> String value. The maximum length of key is 32 bytes, value 512 bytes, up to 16 key-value pairs. """ def __init__( @@ -176,7 +176,7 @@ def value_types(self) -> list[ValueType]: """Retrieves list of column value types Returns: - list[ValueType]: List of column value types + List of column value types """ return [c.value_type for c in self] @@ -193,7 +193,7 @@ def value_types(self) -> list[ValueType]: """Retrieves list of column value types Returns: - list[ValueType]: List of column value types + List of column value types """ return [c.value_type for c in self] @@ -203,16 +203,16 @@ class Sequence(WriteableCogniteResourceWithClientRef["SequenceWrite"]): This is the read version of the class, it is used for retrieving data from the CDF. Args: - id (int): Unique cognite-provided identifier for the sequence - created_time (int): Time when this sequence was created in CDF in milliseconds since Jan 1, 1970. - last_updated_time (int): The last time this sequence was updated in CDF, in milliseconds since Jan 1, 1970. - name (str | None): Name of the sequence - description (str | None): Description of the sequence - asset_id (int | None): Optional asset this sequence is associated with - external_id (str | None): The external ID provided by the client. Must be unique for the resource type. - metadata (dict[str, Any] | None): Custom, application-specific metadata. String key -> String value. The maximum length of the key is 32 bytes, the value 512 bytes, with up to 16 key-value pairs. - columns (typing.Sequence[SequenceColumn]): List of column definitions - data_set_id (int | None): Data set that this sequence belongs to + id: Unique cognite-provided identifier for the sequence + created_time: Time when this sequence was created in CDF in milliseconds since Jan 1, 1970. + last_updated_time: The last time this sequence was updated in CDF, in milliseconds since Jan 1, 1970. + name: Name of the sequence + description: Description of the sequence + asset_id: Optional asset this sequence is associated with + external_id: The external ID provided by the client. Must be unique for the resource type. + metadata: Custom, application-specific metadata. String key -> String value. The maximum length of the key is 32 bytes, the value 512 bytes, with up to 16 key-value pairs. + columns: List of column definitions + data_set_id: Data set that this sequence belongs to """ def __init__( @@ -294,11 +294,11 @@ async def rows_async(self, start: int, end: int | None) -> SequenceRows: """Retrieves rows from this sequence. Args: - start (int): Row number to start from (inclusive). - end (int | None): Upper limit on the row number (exclusive). Set to None or -1 to get all rows until end of sequence. + start: Row number to start from (inclusive). + end: Upper limit on the row number (exclusive). Set to None or -1 to get all rows until end of sequence. Returns: - SequenceRows: List of sequence data. + List of sequence data. """ if self.external_id is not None: return await self._cognite_client.sequences.data.retrieve( @@ -317,7 +317,7 @@ def column_external_ids(self) -> list[str]: """Retrieves list of column external ids for the sequence, for use in e.g. data retrieve or insert methods Returns: - list[str]: List of sequence column external ids + List of sequence column external ids """ assert self.columns is not None return self.columns.as_external_ids() @@ -327,7 +327,7 @@ def column_value_types(self) -> list[ValueType]: """Retrieves list of column value types Returns: - list[ValueType]: List of column value types + List of column value types """ assert self.columns is not None return self.columns.value_types @@ -338,13 +338,13 @@ class SequenceWrite(WriteableCogniteResource["SequenceWrite"]): This is the write version of the class, it is used for inserting data into the CDF. Args: - columns (typing.Sequence[SequenceColumnWrite]): List of column definitions - name (str | None): Name of the sequence - description (str | None): Description of the sequence - asset_id (int | None): Optional asset this sequence is associated with - external_id (str | None): The external ID provided by the client. Must be unique for the resource type. - metadata (dict[str, Any] | None): Custom, application-specific metadata. String key -> String value. Th maximum length of key is 32 bytes, value 512 bytes, up to 16 key-value pairs. - data_set_id (int | None): Data set that this sequence belongs to + columns: List of column definitions + name: Name of the sequence + description: Description of the sequence + asset_id: Optional asset this sequence is associated with + external_id: The external ID provided by the client. Must be unique for the resource type. + metadata: Custom, application-specific metadata. String key -> String value. Th maximum length of key is 32 bytes, value 512 bytes, up to 16 key-value pairs. + data_set_id: Data set that this sequence belongs to """ def __init__( @@ -396,14 +396,14 @@ class SequenceFilter(CogniteFilter): """No description. Args: - name (str | None): Return only sequences with this *exact* name. - external_id_prefix (str | None): Filter by this (case-sensitive) prefix for the external ID. - metadata (dict[str, Any] | None): Filter the sequences by metadata fields and values (case-sensitive). Format is {"key1":"value1","key2":"value2"}. - asset_ids (typing.Sequence[int] | None): Return only sequences linked to one of the specified assets. - asset_subtree_ids (typing.Sequence[dict[str, Any]] | None): Only include sequences that have a related asset in a subtree rooted at any of these assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. - created_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. - last_updated_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. - data_set_ids (typing.Sequence[dict[str, Any]] | None): Only include sequences that belong to these datasets. + name: Return only sequences with this *exact* name. + external_id_prefix: Filter by this (case-sensitive) prefix for the external ID. + metadata: Filter the sequences by metadata fields and values (case-sensitive). Format is {"key1":"value1","key2":"value2"}. + asset_ids: Return only sequences linked to one of the specified assets. + asset_subtree_ids: Only include sequences that have a related asset in a subtree rooted at any of these assetIds. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. + created_time: Range between two timestamps. + last_updated_time: Range between two timestamps. + data_set_ids: Only include sequences that belong to these datasets. """ def __init__( @@ -474,8 +474,8 @@ class SequenceUpdate(CogniteUpdate): """No description. Args: - id (int): A server-generated ID for the object. - external_id (str): The external ID provided by the client. Must be unique for the resource type. + id: A server-generated ID for the object. + external_id: The external ID provided by the client. Must be unique for the resource type. """ class _PrimitiveSequenceUpdate(CognitePrimitiveUpdate): @@ -593,8 +593,8 @@ class SequenceRow(CogniteResource): """This class represents a row in a sequence. It is used for both read and write. Args: - row_number (int): The row number for this row. - values (typing.Sequence[RowValues]): List of values in the order defined in the columns field. Number of items must match. Null is accepted for missing values. String values must be no longer than 256 characters. + row_number: The row number for this row. + values: List of values in the order defined in the columns field. Number of items must match. Null is accepted for missing values. String values must be no longer than 256 characters. """ @@ -625,10 +625,10 @@ class SequenceRows(CogniteResource): """An object representing a list of rows from a sequence. Args: - rows (typing.Sequence[SequenceRow]): The sequence rows. - columns (SequenceColumnList): The column information. - id (int | None): Identifier of the sequence the data belong to - external_id (str | None): External id of the sequence the data belong to + rows: The sequence rows. + columns: The column information. + id: Identifier of the sequence the data belong to + external_id: External id of the sequence the data belong to """ def __init__( @@ -675,10 +675,10 @@ def get_column(self, external_id: str) -> list[RowValues]: """Get a column by external_id. Args: - external_id (str): External id of the column. + external_id: External id of the column. Returns: - list[RowValues]: A list of values for that column in the sequence + A list of values for that column in the sequence """ try: ix = self.column_external_ids.index(external_id) @@ -702,10 +702,10 @@ def dump(self, camel_case: bool = True) -> dict[str, Any]: """Dump the sequence data into a json serializable Python data type. Args: - camel_case (bool): Use camelCase for attribute names. Defaults to True. + camel_case: Use camelCase for attribute names. Defaults to True. Returns: - dict[str, Any]: A dictionary representing the instance. + A dictionary representing the instance. """ key = "rowNumber" if camel_case else "row_number" dumped: dict[str, Any] = { @@ -731,10 +731,10 @@ def to_pandas(self, column_names: ColumnNames = "columnExternalId") -> pandas.Da """Convert the sequence data into a pandas DataFrame. Args: - column_names (ColumnNames): Which field(s) to use as column header. Can use "externalId", "id", "columnExternalId", "id|columnExternalId" or "externalId|columnExternalId". + column_names: Which field(s) to use as column header. Can use "externalId", "id", "columnExternalId", "id|columnExternalId" or "externalId|columnExternalId". Returns: - pandas.DataFrame: The dataframe. + The dataframe. """ pd = local_import("pandas") @@ -770,7 +770,7 @@ def column_external_ids(self) -> list[str]: """Retrieves list of column external ids for the sequence, for use in e.g. data retrieve or insert methods. Returns: - list[str]: List of sequence column external ids. + List of sequence column external ids. """ assert self.columns is not None return self.columns.as_external_ids() @@ -780,7 +780,7 @@ def column_value_types(self) -> list[ValueType]: """Retrieves list of column value types. Returns: - list[ValueType]: List of column value types + List of column value types """ assert self.columns is not None return self.columns.value_types @@ -825,12 +825,12 @@ def to_pandas( """Convert the sequence data list into a pandas DataFrame. Each column will be a sequence. Args: - key (Literal['id', 'external_id']): If concat = False, this decides which field to use as key in the dictionary. Defaults to "external_id". - column_names (ColumnNames): Which field to use as column header. Can use any combination of "externalId", "columnExternalId", "id" and other characters as a template. - concat (bool): Whether to concatenate the sequences into a single DataFrame or return a dictionary of DataFrames. Defaults to False. + key: If concat = False, this decides which field to use as key in the dictionary. Defaults to "external_id". + column_names: Which field to use as column header. Can use any combination of "externalId", "columnExternalId", "id" and other characters as a template. + concat: Whether to concatenate the sequences into a single DataFrame or return a dictionary of DataFrames. Defaults to False. Returns: - pandas.DataFrame | dict[str, pandas.DataFrame] | dict[int, pandas.DataFrame]: The sequence data list as a pandas DataFrame. + The sequence data list as a pandas DataFrame. """ pd = local_import("pandas") if concat: diff --git a/cognite/client/data_classes/shared.py b/cognite/client/data_classes/shared.py index c3de2032a4..ce520cf4e7 100644 --- a/cognite/client/data_classes/shared.py +++ b/cognite/client/data_classes/shared.py @@ -12,9 +12,9 @@ class TimestampRange(CogniteResource): """Range between two timestamps. Args: - max (int | None): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - min (int | None): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - **_ (Any): No description. + max: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + min: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + **_: No description. """ def __init__(self, max: int | None = None, min: int | None = None, **_: Any) -> None: @@ -30,7 +30,7 @@ class AggregateResult(CogniteResource): """Aggregation group Args: - count (int): Size of the aggregation group + count: Size of the aggregation group """ def __init__(self, count: int) -> None: @@ -45,8 +45,8 @@ class AggregateUniqueValuesResult(AggregateResult): """Aggregation group Args: - count (int): Size of the aggregation group - value (int | str | None): A unique value from the requested field + count: Size of the aggregation group + value: A unique value from the requested field """ def __init__(self, count: int, value: int | str | None) -> None: @@ -62,9 +62,9 @@ class Geometry(CogniteResource): """Represents the points, curves and surfaces in the coordinate space. Args: - type (Literal['Point', 'MultiPoint', 'LineString', 'MultiLineString', 'Polygon', 'MultiPolygon']): The geometry type. - coordinates (list): An array of the coordinates of the geometry. The structure of the elements in this array is determined by the type of geometry. - geometries (Collection[Geometry] | None): No description. + type: The geometry type. + coordinates: An array of the coordinates of the geometry. The structure of the elements in this array is determined by the type of geometry. + geometries: No description. Examples: Point: @@ -139,8 +139,8 @@ class GeometryFilter(CogniteFilter): """Represents the points, curves and surfaces in the coordinate space. Args: - type (Literal['Point', 'MultiPoint', 'LineString', 'MultiLineString', 'Polygon', 'MultiPolygon']): The geometry type. - coordinates (Sequence[float] | Sequence[Sequence[float]] | Sequence[Sequence[Sequence[float]]] | Sequence[Sequence[Sequence[Sequence[float]]]]): An array of the coordinates of the geometry. The structure of the elements in this array is determined by the type of geometry. + type: The geometry type. + coordinates: An array of the coordinates of the geometry. The structure of the elements in this array is determined by the type of geometry. Point: Coordinates of a point in 2D space, described as an array of 2 numbers. @@ -214,9 +214,9 @@ class GeoLocation(CogniteResource): """A GeoLocation object conforming to the GeoJSON spec. Args: - type (Literal['Feature']): The GeoJSON type. Currently only 'Feature' is supported. - geometry (Geometry): The geometry. One of 'Point', 'MultiPoint, 'LineString', 'MultiLineString', 'Polygon', or 'MultiPolygon'. - properties (dict | None): Optional additional properties in a String key -> Object value format. + type: The GeoJSON type. Currently only 'Feature' is supported. + geometry: The geometry. One of 'Point', 'MultiPoint, 'LineString', 'MultiLineString', 'Polygon', or 'MultiPolygon'. + properties: Optional additional properties in a String key -> Object value format. """ _VALID_TYPES = frozenset({"Feature"}) @@ -250,8 +250,8 @@ class GeoLocationFilter(CogniteResource): """Return only the resource matching the specified geographic relation. Args: - relation (str): One of the following supported queries: INTERSECTS, DISJOINT, WITHIN. - shape (GeometryFilter): Represents the points, curves and surfaces in the coordinate space. + relation: One of the following supported queries: INTERSECTS, DISJOINT, WITHIN. + shape: Represents the points, curves and surfaces in the coordinate space. """ def __init__(self, relation: str, shape: GeometryFilter) -> None: diff --git a/cognite/client/data_classes/simulators/logs.py b/cognite/client/data_classes/simulators/logs.py index 46bd6e15a6..ea5d0179c2 100644 --- a/cognite/client/data_classes/simulators/logs.py +++ b/cognite/client/data_classes/simulators/logs.py @@ -21,9 +21,9 @@ class SimulatorLogData(CogniteResource): Simulator log data represents a single log entry in a simulator log. Args: - timestamp (int): Timestamp of the log message. - message (str): Log message. - severity (Severity): Log severity level. + timestamp: Timestamp of the log message. + message: Log message. + severity: Log severity level. """ timestamp: int @@ -48,12 +48,12 @@ class SimulatorLog(CogniteResource): They help users identify issues, diagnose problems, and gain insights into the behavior of the simulator integrations. Args: - id (int): A unique id of a simulator resource log. - data (Sequence[SimulatorLogData]): Log data of the simulator resource. - created_time (int): The number of milliseconds since epoch. - last_updated_time (int): The number of milliseconds since epoch. - data_set_id (int): Dataset id of the resource. - severity (Severity | None): Minimum severity level of the log data. This overrides connector configuration minimum severity level and can be used for more granular control. + id: A unique id of a simulator resource log. + data: Log data of the simulator resource. + created_time: The number of milliseconds since epoch. + last_updated_time: The number of milliseconds since epoch. + data_set_id: Dataset id of the resource. + severity: Minimum severity level of the log data. This overrides connector configuration minimum severity level and can be used for more granular control. """ def __init__( diff --git a/cognite/client/data_classes/simulators/models.py b/cognite/client/data_classes/simulators/models.py index 21fc2c5149..42b918536c 100644 --- a/cognite/client/data_classes/simulators/models.py +++ b/cognite/client/data_classes/simulators/models.py @@ -67,21 +67,21 @@ class SimulatorModelRevision(WriteableCogniteResourceWithClientRef["SimulatorMod Each revision ensures that modifications to models are traceable and allows users to understand the evolution of a given model. Args: - id (int): Internal id of the simulator model revision - external_id (str): External id of the simulator model revision - model_external_id (str): External id of the associated simulator model - file_id (int): The id of the file associated with the simulator model revision - created_time (int): The time when the simulator model revision was created - last_updated_time (int): The time when the simulator model revision was last updated - simulator_external_id (str): External id of the simulator associated with the simulator model revision - data_set_id (int): The id of the dataset associated with the simulator model revision - created_by_user_id (str): The id of the user who created the simulator model revision - status (str): The status of the simulator model revision - version_number (int): The version number of the simulator model revision - log_id (int): The id of the log associated with the simulator model revision - description (str | None): The description of the simulator model revision - status_message (str | None): The current status message of the simulator model revision - external_dependencies (list[SimulatorModelRevisionDependency] | None): A list of external dependencies for the simulator model revision + id: Internal id of the simulator model revision + external_id: External id of the simulator model revision + model_external_id: External id of the associated simulator model + file_id: The id of the file associated with the simulator model revision + created_time: The time when the simulator model revision was created + last_updated_time: The time when the simulator model revision was last updated + simulator_external_id: External id of the simulator associated with the simulator model revision + data_set_id: The id of the dataset associated with the simulator model revision + created_by_user_id: The id of the user who created the simulator model revision + status: The status of the simulator model revision + version_number: The version number of the simulator model revision + log_id: The id of the log associated with the simulator model revision + description: The description of the simulator model revision + status_message: The current status message of the simulator model revision + external_dependencies: A list of external dependencies for the simulator model revision """ def __init__( @@ -160,7 +160,7 @@ async def get_data_async(self) -> SimulatorModelRevisionData | None: """`Retrieve data associated with this simulator model revision. `_ Returns: - SimulatorModelRevisionData | None: Data for the simulator model revision. + Data for the simulator model revision. """ data = await self._cognite_client.simulators.models.revisions.retrieve_data( model_revision_external_id=self.external_id @@ -188,12 +188,12 @@ class SimulatorModelCore(WriteableCogniteResource["SimulatorModelWrite"], ABC): This is the read/response format of a simulator model. Args: - external_id (str): External id of the simulator model - simulator_external_id (str): External id of the associated simulator - data_set_id (int): The id of the dataset associated with the simulator model - name (str): The name of the simulator model - type (str): The type key of the simulator model - description (str | None): The description of the simulator model + external_id: External id of the simulator model + simulator_external_id: External id of the associated simulator + data_set_id: The id of the dataset associated with the simulator model + name: The name of the simulator model + type: The type key of the simulator model + description: The description of the simulator model """ def __init__( @@ -243,15 +243,15 @@ class SimulatorModel(SimulatorModelCore): This is the read/response format of a simulator model. Args: - id (int): A unique id of a simulator model - external_id (str): External id of the simulator model - simulator_external_id (str): External id of the associated simulator - data_set_id (int): The id of the dataset associated with the simulator model - name (str): The name of the simulator model - type (str): The type key of the simulator model - created_time (int): The time when the simulator model was created - last_updated_time (int): The time when the simulator model was last updated - description (str | None): The description of the simulator model + id: A unique id of a simulator model + external_id: External id of the simulator model + simulator_external_id: External id of the associated simulator + data_set_id: The id of the dataset associated with the simulator model + name: The name of the simulator model + type: The type key of the simulator model + created_time: The time when the simulator model was created + last_updated_time: The time when the simulator model was last updated + description: The description of the simulator model """ def __init__( @@ -368,8 +368,8 @@ class SimulatorModelRevisionDependency(CogniteResource): """ Represents an external dependency for a simulator model revision. Args: - file (int): The file ID associated with the external dependency. - arguments (dict[str, str]): A dictionary that contains the key-value pairs (fields) for the external dependency. + file: The file ID associated with the external dependency. + arguments: A dictionary that contains the key-value pairs (fields) for the external dependency. """ file: SimulatorModelDependencyFileReference @@ -597,13 +597,13 @@ class SimulatorModelRevisionData(CogniteResource): - Fully implement it with comprehensive model details Args: - model_revision_external_id (str): External id of the associated model revision - created_time (int): The time when the simulator model revision data was created - last_updated_time (int): The time when the simulator model revision data was last updated - data_set_id (int): The id of the dataset associated with the simulator model revision data - flowsheets (list[SimulatorFlowsheet] | None): Extracted flowsheet information, + model_revision_external_id: External id of the associated model revision + created_time: The time when the simulator model revision data was created + last_updated_time: The time when the simulator model revision data was last updated + data_set_id: The id of the dataset associated with the simulator model revision data + flowsheets: Extracted flowsheet information, if supported by the connector. May include blocks, equipment, properties, and connections - info (dict[str, str] | None): Additional metadata extracted from the simulator file, + info: Additional metadata extracted from the simulator file, if supported by the connector """ diff --git a/cognite/client/data_classes/simulators/routine_revisions.py b/cognite/client/data_classes/simulators/routine_revisions.py index 8c010fe652..fbf3cd4a5b 100644 --- a/cognite/client/data_classes/simulators/routine_revisions.py +++ b/cognite/client/data_classes/simulators/routine_revisions.py @@ -33,8 +33,8 @@ class SimulationValueUnitInput(CogniteResource): The unit of the simulation value. Args: - name (str): The name of the unit. - quantity (str | None): The quantity of the unit. + name: The name of the unit. + quantity: The quantity of the unit. """ name: str @@ -54,10 +54,10 @@ class SimulatorRoutineInput(CogniteResource, ABC): The input of the simulator routine revision. Args: - name (str): The name of the input. - reference_id (str): The reference ID of the input. - save_timeseries_external_id (str | None): The external ID of the timeseries to save the input. If not provided, the input is not saved to a timeseries. - unit (SimulationValueUnitInput | None): The unit of the input. + name: The name of the input. + reference_id: The reference ID of the input. + save_timeseries_external_id: The external ID of the timeseries to save the input. If not provided, the input is not saved to a timeseries. + unit: The unit of the input. """ _type: ClassVar[str] @@ -100,12 +100,12 @@ class SimulatorRoutineInputTimeseries(SimulatorRoutineInput): The timeseries input of the simulator routine revision. Args: - name (str): The name of the input. - reference_id (str): The reference ID of the input. - source_external_id (str): The external ID of the source timeseries. - aggregate (Literal["average", "interpolation", "stepInterpolation"] | None): The aggregation method to use for the timeseries. - save_timeseries_external_id (str | None): The external ID of the timeseries to save the input. If not provided, the input is not saved to a timeseries. - unit (SimulationValueUnitInput | None): The unit of the input. + name: The name of the input. + reference_id: The reference ID of the input. + source_external_id: The external ID of the source timeseries. + aggregate: The aggregation method to use for the timeseries. + save_timeseries_external_id: The external ID of the timeseries to save the input. If not provided, the input is not saved to a timeseries. + unit: The unit of the input. """ _type = "timeseries" @@ -141,12 +141,12 @@ class SimulatorRoutineInputConstant(SimulatorRoutineInput): The constant input of the simulator routine revision. Args: - name (str): The name of the input. - reference_id (str): The reference ID of the input. - value (str | int | float | list[str] | list[int] | list[float]): The value of the input. - value_type (Literal["STRING", "DOUBLE", "STRING_ARRAY", "DOUBLE_ARRAY"]): The value type of the input. - unit (SimulationValueUnitInput | None): The unit of the input. - save_timeseries_external_id (str | None): The external ID of the timeseries to save the input. If not provided, the input is not saved to a timeseries. + name: The name of the input. + reference_id: The reference ID of the input. + value: The value of the input. + value_type: The value type of the input. + unit: The unit of the input. + save_timeseries_external_id: The external ID of the timeseries to save the input. If not provided, the input is not saved to a timeseries. """ _type = "constant" @@ -182,11 +182,11 @@ class SimulatorRoutineOutput(CogniteResource): The output of the simulator routine revision. Args: - name (str): The name of the output. - reference_id (str): The reference ID of the output. - value_type (str): The value type of the output. - unit (SimulationValueUnitInput | None): The unit of the output. - save_timeseries_external_id (str | None): The external ID of the timeseries to save the output. If not provided, the output is not saved to a timeseries. + name: The name of the output. + reference_id: The reference ID of the output. + value_type: The value type of the output. + unit: The unit of the output. + save_timeseries_external_id: The external ID of the timeseries to save the output. If not provided, the output is not saved to a timeseries. """ name: str @@ -222,7 +222,7 @@ class SimulatorRoutineSchedule(CogniteResource): The schedule configuration of the simulator routine revision. Args: - cron_expression (str): The cron expression of the schedule. + cron_expression: The cron expression of the schedule. """ cron_expression: str @@ -240,9 +240,9 @@ class SimulatorRoutineDataSampling(CogniteResource): Learn more about data sampling . Args: - sampling_window (int): Sampling window of the data sampling. Represented in minutes - granularity (int): The granularity of the data sampling in minutes. - validation_window (int | None): Validation window of the data sampling. Represented in minutes. Used when either logical check or steady state detection is enabled. + sampling_window: Sampling window of the data sampling. Represented in minutes + granularity: The granularity of the data sampling in minutes. + validation_window: Validation window of the data sampling. Represented in minutes. Used when either logical check or steady state detection is enabled. """ sampling_window: int @@ -266,10 +266,10 @@ class SimulatorRoutineLogicalCheck(CogniteResource): Learn more about logical checks . Args: - aggregate (Literal["average", "interpolation", "stepInterpolation"]): The aggregation method to use for the time series. - operator (Literal["eq", "ne", "gt", "ge", "lt", "le"]): The operator to use for the logical check. - value (float): The value to use for the logical check. - timeseries_external_id (str | None): The external ID of the time series to check. + aggregate: The aggregation method to use for the time series. + operator: The operator to use for the logical check. + value: The value to use for the logical check. + timeseries_external_id: The external ID of the time series to check. """ aggregate: Literal["average", "interpolation", "stepInterpolation"] @@ -295,11 +295,11 @@ class SimulatorRoutineSteadyStateDetection(CogniteResource): Learn more about steady state detection . Args: - aggregate (Literal["average", "interpolation", "stepInterpolation"]): The aggregation method to use for the time series. - min_section_size (int): The minimum number of consecutive data points that must meet the steady state criteria. - var_threshold (float): The maximum variance allowed for the steady state region. - slope_threshold (float): The maximum slope allowed for the steady state region. - timeseries_external_id (str | None): The external ID of the time series to check. + aggregate: The aggregation method to use for the time series. + min_section_size: The minimum number of consecutive data points that must meet the steady state criteria. + var_threshold: The maximum variance allowed for the steady state region. + slope_threshold: The maximum slope allowed for the steady state region. + timeseries_external_id: The external ID of the time series to check. """ aggregate: Literal["average", "interpolation", "stepInterpolation"] @@ -327,12 +327,12 @@ class SimulatorRoutineConfiguration(CogniteResource): Learn more about simulator routine configuration . Args: - inputs (SimulatorRoutineInputList | Sequence[SimulatorRoutineInput] | None): The inputs of the simulator routine revision. Each element can be either a constant or a timeseries input. - outputs (SimulatorRoutineOutputList | Sequence[SimulatorRoutineOutput] | None): The outputs of the simulator routine revision. - logical_check (Sequence[SimulatorRoutineLogicalCheck] | None): Logical check configuration. - steady_state_detection (Sequence[SimulatorRoutineSteadyStateDetection] | None): Steady state detection configuration. - schedule (SimulatorRoutineSchedule | None): Schedule configuration. - data_sampling (SimulatorRoutineDataSampling | None): Data sampling configuration. Learn more about data sampling . + inputs: The inputs of the simulator routine revision. Each element can be either a constant or a timeseries input. + outputs: The outputs of the simulator routine revision. + logical_check: Logical check configuration. + steady_state_detection: Steady state detection configuration. + schedule: Schedule configuration. + data_sampling: Data sampling configuration. Learn more about data sampling . """ inputs: SimulatorRoutineInputList | None @@ -427,7 +427,7 @@ class SimulatorRoutineStepArguments(CogniteResource, dict, MutableMapping[str, s For "Get" and "Set" step type the reference ID is required. Args: - data (dict[str, str]): The step arguments. + data: The step arguments. """ def __init__(self, data: dict[str, str]) -> None: @@ -447,10 +447,10 @@ class SimulatorRoutineStep(CogniteResource): The step of the simulator routine revision. Args: - step_type (str): The type of the step. Can be "Get", "Set", or "Command". - arguments (SimulatorRoutineStepArguments): The arguments of the step. - order (int): Represents the order in which the step is executed compared to other steps in the stage. - description (str | None): The description of the step. + step_type: The type of the step. Can be "Get", "Set", or "Command". + arguments: The arguments of the step. + order: Represents the order in which the step is executed compared to other steps in the stage. + description: The description of the step. """ step_type: Literal["Get", "Set", "Command"] @@ -482,9 +482,9 @@ class SimulatorRoutineStage(CogniteResource): The stage of the simulator routine revision. This is a way to organize the steps of the simulator routine revision. Args: - order (int): Represents the order in which the stage is executed compared to other stages in the script. - steps (list[SimulatorRoutineStep]): The steps of the stage. - description (str | None): The description of the stage. + order: Represents the order in which the stage is executed compared to other stages in the script. + steps: The steps of the stage. + description: The description of the stage. """ order: int @@ -540,10 +540,10 @@ class SimulatorRoutineRevisionWrite(SimulatorRoutineRevisionCore): This is a writeable version of a simulator routine revision, it is used when creating simulator routine revisions. Args: - external_id (str): The external ID provided by the client. Must be unique for the resource type. - routine_external_id (str): The external ID of the simulator routine. - configuration (SimulatorRoutineConfiguration | None): The configuration of the simulator routine revision. - script (SimulatorRoutineStageList | Sequence[SimulatorRoutineStage] | None): The script of the simulator routine revision. + external_id: The external ID provided by the client. Must be unique for the resource type. + routine_external_id: The external ID of the simulator routine. + configuration: The configuration of the simulator routine revision. + script: The script of the simulator routine revision. """ @@ -584,19 +584,19 @@ class SimulatorRoutineRevision(SimulatorRoutineRevisionCore): Each model can have multiple routines, each performing different objectives such as calculating optimal operation setpoints, forecasting production, benchmarking asset performance, and more. Args: - id (int): The unique identifier of the simulator routine revision. - external_id (str): The external ID provided by the client. Must be unique for the resource type. - simulator_external_id (str): The external ID of the simulator. - simulator_integration_external_id (str | None): The external ID of the simulator integration. - routine_external_id (str): The external ID of the simulator routine. - model_external_id (str): The external ID of the simulator model. - version_number (int): The version number of the simulator routine revision. Unique for each simulator routine. - created_time (int): The timestamp of when the simulator routine revision was created. - data_set_id (int): The ID of the data set associated with the simulator routine revision. - created_by_user_id (str): The ID of the user who created the simulator routine revision. - configuration (SimulatorRoutineConfiguration | None): The configuration of the simulator routine revision. - script (SimulatorRoutineStageList | Sequence[SimulatorRoutineStage] | None): The script of the simulator routine revision. - kind (Literal['long'] | None): The kind of simulator routine. Routines with kind 'long' may have more inputs/outputs, steps, and longer runtime. + id: The unique identifier of the simulator routine revision. + external_id: The external ID provided by the client. Must be unique for the resource type. + simulator_external_id: The external ID of the simulator. + simulator_integration_external_id: The external ID of the simulator integration. + routine_external_id: The external ID of the simulator routine. + model_external_id: The external ID of the simulator model. + version_number: The version number of the simulator routine revision. Unique for each simulator routine. + created_time: The timestamp of when the simulator routine revision was created. + data_set_id: The ID of the data set associated with the simulator routine revision. + created_by_user_id: The ID of the user who created the simulator routine revision. + configuration: The configuration of the simulator routine revision. + script: The script of the simulator routine revision. + kind: The kind of simulator routine. Routines with kind 'long' may have more inputs/outputs, steps, and longer runtime. """ def __init__( @@ -688,7 +688,7 @@ def to_pandas(self) -> pandas.DataFrame: """Convert the list of stages to a pandas DataFrame. Returns: - pandas.DataFrame: DataFrame with stage and step information. + DataFrame with stage and step information. """ pd = local_import("pandas") if not self.data: @@ -735,7 +735,7 @@ def to_pandas(self) -> pandas.DataFrame: """Convert the list of inputs to a pandas DataFrame. Returns: - pandas.DataFrame: DataFrame with input information. + DataFrame with input information. """ pd = local_import("pandas") if not self.data: @@ -775,7 +775,7 @@ def to_pandas(self) -> pandas.DataFrame: """Convert the list of outputs to a pandas DataFrame. Returns: - pandas.DataFrame: DataFrame with output information. + DataFrame with output information. """ pd = local_import("pandas") if not self.data: diff --git a/cognite/client/data_classes/simulators/routines.py b/cognite/client/data_classes/simulators/routines.py index 13baf7aa77..0775af9127 100644 --- a/cognite/client/data_classes/simulators/routines.py +++ b/cognite/client/data_classes/simulators/routines.py @@ -25,12 +25,12 @@ class SimulatorRoutineCore(WriteableCogniteResource["SimulatorRoutineWrite"], AB This is the read/response format of a simulator routine. Args: - external_id (str): External id of the simulator routine - model_external_id (str): External id of the associated simulator model - simulator_integration_external_id (str | None): External id of the associated simulator integration - name (str): The name of the simulator routine - description (str | None): The description of the simulator routine - kind (Literal['long'] | None): The kind of simulator routine. Routines with kind 'long' may have more inputs/outputs, steps, and longer runtime. + external_id: External id of the simulator routine + model_external_id: External id of the associated simulator model + simulator_integration_external_id: External id of the associated simulator integration + name: The name of the simulator routine + description: The description of the simulator routine + kind: The kind of simulator routine. Routines with kind 'long' may have more inputs/outputs, steps, and longer runtime. """ def __init__( @@ -72,12 +72,12 @@ class SimulatorRoutineWrite(SimulatorRoutineCore): This is the read/response format of a simulator routine. Args: - external_id (str): External id of the simulator routine - model_external_id (str): External id of the associated simulator model - simulator_integration_external_id (str): External id of the associated simulator integration - name (str): The name of the simulator routine - description (str | None): The description of the simulator routine - kind (Literal['long'] | None): The kind of simulator routine. Routines with kind 'long' may have more inputs/outputs, steps, and longer runtime. + external_id: External id of the simulator routine + model_external_id: External id of the associated simulator model + simulator_integration_external_id: External id of the associated simulator integration + name: The name of the simulator routine + description: The description of the simulator routine + kind: The kind of simulator routine. Routines with kind 'long' may have more inputs/outputs, steps, and longer runtime. """ def as_write(self) -> SimulatorRoutineWrite: @@ -96,17 +96,17 @@ class SimulatorRoutine(SimulatorRoutineCore): This is the read/response format of a simulator routine. Args: - id (int): A unique id of a simulator routine - external_id (str): External id of the simulator routine - model_external_id (str): External id of the associated simulator model - simulator_integration_external_id (str | None): External id of the associated simulator integration - name (str): The name of the simulator routine - data_set_id (int): The id of the dataset associated with the simulator routine - simulator_external_id (str): External id of the associated simulator - created_time (int): The time when the simulator routine was created - last_updated_time (int): The time when the simulator routine was last updated - description (str | None): The description of the simulator routine - kind (Literal['long'] | None): The kind of simulator routine. Routines with kind 'long' may have more inputs/outputs, steps, and longer runtime. + id: A unique id of a simulator routine + external_id: External id of the simulator routine + model_external_id: External id of the associated simulator model + simulator_integration_external_id: External id of the associated simulator integration + name: The name of the simulator routine + data_set_id: The id of the dataset associated with the simulator routine + simulator_external_id: External id of the associated simulator + created_time: The time when the simulator routine was created + last_updated_time: The time when the simulator routine was last updated + description: The description of the simulator routine + kind: The kind of simulator routine. Routines with kind 'long' may have more inputs/outputs, steps, and longer runtime. """ def __init__( diff --git a/cognite/client/data_classes/simulators/runs.py b/cognite/client/data_classes/simulators/runs.py index ac5046b842..8d3658f7ee 100644 --- a/cognite/client/data_classes/simulators/runs.py +++ b/cognite/client/data_classes/simulators/runs.py @@ -88,17 +88,14 @@ class SimulationRunWrite(WriteableCogniteResource["SimulationRunWrite"]): 2. By routine revision external ID + model revision external ID Args: - routine_external_id (str | None): External id of the associated simulator routine. - Cannot be specified together with routine_revision_external_id and model_revision_external_id. - routine_revision_external_id (str | None): External id of the associated simulator routine revision. - Must be specified together with model_revision_external_id. - model_revision_external_id (str | None): External id of the associated simulator model revision. - Must be specified together with routine_revision_external_id. - run_type (str | None): The type of the simulation run - run_time (int | None): Run time in milliseconds. Reference timestamp used for data pre-processing and data sampling. - queue (bool | None): Queue the simulation run when connector is down. - log_severity (str | None): Override the minimum severity level for the simulation run logs. If not provided, the minimum severity is read from the connector logger configuration. - inputs (list[SimulationInputOverride] | None): List of input overrides + routine_external_id: External id of the associated simulator routine. Cannot be specified together with routine_revision_external_id and model_revision_external_id. + routine_revision_external_id: External id of the associated simulator routine revision. Must be specified together with model_revision_external_id. + model_revision_external_id: External id of the associated simulator model revision. Must be specified together with routine_revision_external_id. + run_type: The type of the simulation run + run_time: Run time in milliseconds. Reference timestamp used for data pre-processing and data sampling. + queue: Queue the simulation run when connector is down. + log_severity: Override the minimum severity level for the simulation run logs. If not provided, the minimum severity is read from the connector logger configuration. + inputs: List of input overrides """ def __init__( @@ -188,23 +185,23 @@ class SimulationRun(WriteableCogniteResourceWithClientRef["SimulationRunWrite"]) This is the read/response format of a simulation run. Args: - id (int): The id of the simulation run - simulator_external_id (str): External id of the associated simulator - simulator_integration_external_id (str | None): External id of the associated simulator integration - model_external_id (str): External id of the associated simulator model - model_revision_external_id (str): External id of the associated simulator model revision - routine_revision_external_id (str): External id of the associated simulator routine revision - routine_external_id (str): External id of the associated simulator routine - run_type (Literal['external', 'manual', 'scheduled']): The type of the simulation run - status (Literal['ready', 'running', 'success', 'failure']): The status of the simulation run - data_set_id (int): The id of the dataset associated with the simulation run - user_id (str): The id of the user who executed the simulation run - log_id (int): The id of the log associated with the simulation run - created_time (int): The number of milliseconds since epoch - last_updated_time (int): The number of milliseconds since epoch - status_message (str | None): The status message of the simulation run - simulation_time (int | None): Simulation time in milliseconds. Timestamp when the input data was sampled. Used for indexing input and output time series. - run_time (int | None): Run time in milliseconds. Reference timestamp used for data pre-processing and data sampling. + id: The id of the simulation run + simulator_external_id: External id of the associated simulator + simulator_integration_external_id: External id of the associated simulator integration + model_external_id: External id of the associated simulator model + model_revision_external_id: External id of the associated simulator model revision + routine_revision_external_id: External id of the associated simulator routine revision + routine_external_id: External id of the associated simulator routine + run_type: The type of the simulation run + status: The status of the simulation run + data_set_id: The id of the dataset associated with the simulation run + user_id: The id of the user who executed the simulation run + log_id: The id of the log associated with the simulation run + created_time: The number of milliseconds since epoch + last_updated_time: The number of milliseconds since epoch + status_message: The status message of the simulation run + simulation_time: Simulation time in milliseconds. Timestamp when the input data was sampled. Used for indexing input and output time series. + run_time: Run time in milliseconds. Reference timestamp used for data pre-processing and data sampling. """ @@ -257,7 +254,7 @@ async def get_logs_async(self) -> SimulatorLog | None: """`Retrieve logs for this simulation run. `_ Returns: - SimulatorLog | None: Log for the simulation run. + Log for the simulation run. """ return await self._cognite_client.simulators.logs.retrieve(ids=self.log_id) @@ -269,7 +266,7 @@ async def get_data_async(self) -> SimulationRunDataItem | None: """`Retrieve data associated with this simulation run. `_ Returns: - SimulationRunDataItem | None: Data for the simulation run. + Data for the simulation run. """ data = await self._cognite_client.simulators.runs.list_run_data(run_id=self.id) if data: @@ -300,7 +297,7 @@ async def wait_async(self, timeout: float = 60) -> None: This is generally not needed to call directly, as client.simulators.routines.run(...) will wait for the simulation to finish by default. Args: - timeout (float): Time out after this many seconds. Defaults to 60 seconds. + timeout: Time out after this many seconds. Defaults to 60 seconds. """ end_time = time.time() + timeout @@ -478,7 +475,7 @@ def to_pandas( # type: ignore [override] """Convert the simulation run data to a pandas DataFrame. Returns: - pandas.DataFrame: The dataframe. + The dataframe. """ pd = local_import("pandas") @@ -517,7 +514,7 @@ def to_pandas( # type: ignore [override] """Convert the simulation run data list to a pandas DataFrame. Returns: - pandas.DataFrame: The dataframe. + The dataframe. """ pd = local_import("pandas") return pd.concat([item.to_pandas() for item in self.data], ignore_index=True) diff --git a/cognite/client/data_classes/simulators/simulators.py b/cognite/client/data_classes/simulators/simulators.py index af8fee6df4..b886f107fa 100644 --- a/cognite/client/data_classes/simulators/simulators.py +++ b/cognite/client/data_classes/simulators/simulators.py @@ -24,14 +24,14 @@ class Simulator(CogniteResource): This is the read/response format of the simulator. Args: - external_id (str): External id of the simulator - id (int): Id of the simulator. - name (str): Name of the simulator - file_extension_types (Sequence[str]): File extension types supported by the simulator - model_types (Sequence[SimulatorModelType] | None): Model types supported by the simulator - model_dependencies (Sequence[SimulatorModelDependency] | None): Model dependencies supported by the simulator - step_fields (Sequence[SimulatorStep] | None): Step types supported by the simulator when creating routines - unit_quantities (Sequence[SimulatorQuantity] | None): Quantities and their units supported by the simulator + external_id: External id of the simulator + id: Id of the simulator. + name: Name of the simulator + file_extension_types: File extension types supported by the simulator + model_types: Model types supported by the simulator + model_dependencies: Model dependencies supported by the simulator + step_fields: Step types supported by the simulator when creating routines + unit_quantities: Quantities and their units supported by the simulator """ @@ -104,7 +104,7 @@ def get_quantities(self) -> list[str]: """Get a list of quantity names available for this simulator. Returns: - list[str]: List of quantity names from the simulator's unit_quantities. + List of quantity names from the simulator's unit_quantities. """ if not self.unit_quantities: return [] @@ -114,10 +114,10 @@ def get_units(self, quantity: str) -> list[str]: """Get a list of unit names for a specific quantity. Args: - quantity (str): The name of the quantity to get units for. + quantity: The name of the quantity to get units for. Returns: - list[str]: List of unit names for the specified quantity. + List of unit names for the specified quantity. Raises: ValueError: If the specified quantity does not exist for this simulator. @@ -239,9 +239,9 @@ class SimulatorModelDependencyFields(CogniteResource): """ Represents the fields supported by the simulator for external dependencies. Args: - name (str): The name of the field. - label (str): The label of the field. - info (str): Additional information about the field. + name: The name of the field. + label: The label of the field. + info: Additional information about the field. """ name: str @@ -262,8 +262,8 @@ class SimulatorModelDependency(CogniteResource): """ Defines the simulator model dependency, specifying the supported fields and file extension types compatible with the simulator. Args: - file_extension_types (Sequence[str]): A list of file extension types supported by the simulator for external dependencies. - fields (Sequence[SimulatorModelDependencyFields]): A list of supported fields. + file_extension_types: A list of file extension types supported by the simulator for external dependencies. + fields: A list of supported fields. """ file_extension_types: Sequence[str] @@ -341,21 +341,21 @@ class SimulatorIntegration(CogniteResource): This is the read/response format of the simulator integration. Args: - id (int): Id of the simulator integration. - external_id (str): External id of the simulator integration - simulator_external_id (str): External id of the associated simulator - heartbeat (int): The interval in seconds between the last heartbeat and the current time - data_set_id (int): The id of the dataset associated with the simulator integration - connector_version (str): The version of the connector - log_id (int): Id of the log associated with this simulator integration. - active (bool): Indicates if the simulator integration is active (i.e., a connector is linked to CDF for this integration). - created_time (int): The time when this simulator integration resource was created. - last_updated_time (int): The last time the simulator integration resource was updated. - license_status (str | None): The status of the license - simulator_version (str | None): The version of the simulator - license_last_checked_time (int | None): The time when the license was last checked - connector_status (str | None): The status of the connector - connector_status_updated_time (int | None): The time when the connector status was last updated + id: Id of the simulator integration. + external_id: External id of the simulator integration + simulator_external_id: External id of the associated simulator + heartbeat: The interval in seconds between the last heartbeat and the current time + data_set_id: The id of the dataset associated with the simulator integration + connector_version: The version of the connector + log_id: Id of the log associated with this simulator integration. + active: Indicates if the simulator integration is active (i.e., a connector is linked to CDF for this integration). + created_time: The time when this simulator integration resource was created. + last_updated_time: The last time the simulator integration resource was updated. + license_status: The status of the license + simulator_version: The version of the simulator + license_last_checked_time: The time when the license was last checked + connector_status: The status of the connector + connector_status_updated_time: The time when the connector status was last updated """ def __init__( diff --git a/cognite/client/data_classes/three_d.py b/cognite/client/data_classes/three_d.py index 6e12e44582..de9144e0b0 100644 --- a/cognite/client/data_classes/three_d.py +++ b/cognite/client/data_classes/three_d.py @@ -25,8 +25,8 @@ class RevisionCameraProperties(CogniteResource): """Initial camera position and target. Args: - target (list[float]): Initial camera target. - position (list[float]): Initial camera position. + target: Initial camera target. + position: Initial camera position. """ def __init__(self, target: list[float], position: list[float]) -> None: @@ -45,8 +45,8 @@ class BoundingBox3D(CogniteResource): """The bounding box of the subtree with this sector as the root sector. Is null if there are no geometries in the subtree. Args: - max (list[float]): No description. - min (list[float]): No description. + max: No description. + min: No description. """ def __init__(self, max: list[float], min: list[float]) -> None: @@ -66,9 +66,9 @@ class ThreeDModelCore(WriteableCogniteResource["ThreeDModelWrite"], ABC): Args: - name (str): The name of the model. - data_set_id (int | None): The id of the dataset this 3D model belongs to. - metadata (dict[str, str] | None): Custom, application-specific metadata. String key -> String value. Limits: Maximum length of key is 32 bytes, value 512 bytes, up to 16 key-value pairs. + name: The name of the model. + data_set_id: The id of the dataset this 3D model belongs to. + metadata: Custom, application-specific metadata. String key -> String value. Limits: Maximum length of key is 32 bytes, value 512 bytes, up to 16 key-value pairs. """ def __init__( @@ -87,11 +87,11 @@ class ThreeDModel(ThreeDModelCore): This is the read version of ThreeDModel, which is used when retrieving 3D models. Args: - name (str): The name of the model. - id (int): The ID of the model. - created_time (int): The creation time of the resource, in milliseconds since January 1, 1970 at 00:00 UTC. - data_set_id (int | None): The id of the dataset this 3D model belongs to. - metadata (dict[str, str] | None): Custom, application-specific metadata. String key -> String value. Limits: Maximum length of key is 32 bytes, value 512 bytes, up to 16 key-value pairs. + name: The name of the model. + id: The ID of the model. + created_time: The creation time of the resource, in milliseconds since January 1, 1970 at 00:00 UTC. + data_set_id: The id of the dataset this 3D model belongs to. + metadata: Custom, application-specific metadata. String key -> String value. Limits: Maximum length of key is 32 bytes, value 512 bytes, up to 16 key-value pairs. """ def __init__( @@ -137,9 +137,9 @@ class ThreeDModelWrite(ThreeDModelCore): Args: - name (str): The name of the model. - data_set_id (int | None): The id of the dataset this 3D model belongs to. - metadata (dict[str, str] | None): Custom, application-specific metadata. String key -> String value. Limits: Maximum length of key is 32 bytes, value 512 bytes, up to 16 key-value pairs. + name: The name of the model. + data_set_id: The id of the dataset this 3D model belongs to. + metadata: Custom, application-specific metadata. String key -> String value. Limits: Maximum length of key is 32 bytes, value 512 bytes, up to 16 key-value pairs. """ def __init__( @@ -171,7 +171,7 @@ class ThreeDModelUpdate(CogniteUpdate): """No description. Args: - id (int): A server-generated ID for the object. + id: A server-generated ID for the object. """ class _PrimitiveThreeDModelUpdate(CognitePrimitiveUpdate): @@ -244,13 +244,13 @@ class ThreeDModelRevisionCore(WriteableCogniteResource["ThreeDModelRevisionWrite """No description. Args: - file_id (int | None): The file id. - published (bool | None): True if the revision is marked as published. - rotation (list[float] | None): No description. - scale (list[float] | None): Scale of 3D model in directions X,Y and Z. Should be uniform. - translation (list[float] | None): 3D offset of the model. - camera (RevisionCameraProperties | dict[str, Any] | None): Initial camera position and target. - metadata (dict[str, str] | None): Custom, application specific metadata. String key -> String value. Limits: Maximum length of key is 32 bytes, value 512 bytes, up to 16 key-value pairs. + file_id: The file id. + published: True if the revision is marked as published. + rotation: No description. + scale: Scale of 3D model in directions X,Y and Z. Should be uniform. + translation: 3D offset of the model. + camera: Initial camera position and target. + metadata: Custom, application specific metadata. String key -> String value. Limits: Maximum length of key is 32 bytes, value 512 bytes, up to 16 key-value pairs. """ def __init__( @@ -295,19 +295,19 @@ class ThreeDModelRevision(ThreeDModelRevisionCore): This is the read version of ThreeDModelRevision, which is used when retrieving 3D model revisions. Args: - id (int): The ID of the revision. - file_id (int): The file id. - published (bool): True if the revision is marked as published. - rotation (list[float] | None): No description. - scale (list[float] | None): Scale of 3D model in directions X,Y and Z. Should be uniform. - translation (list[float] | None): 3D offset of the model. - camera (RevisionCameraProperties | None): Initial camera position and target. - status (str): The status of the revision. - metadata (dict[str, str] | None): Custom, application specific metadata. String key -> String value. Limits: Maximum length of key is 32 bytes, value 512 bytes, up to 16 key-value pairs. - thumbnail_threed_file_id (int | None): The threed file ID of a thumbnail for the revision. Use /3d/files/{id} to retrieve the file. - thumbnail_url (str | None): The URL of a thumbnail for the revision. - asset_mapping_count (int): The number of asset mappings for this revision. - created_time (int): The creation time of the resource, in milliseconds since January 1, 1970 at 00:00 UTC. + id: The ID of the revision. + file_id: The file id. + published: True if the revision is marked as published. + rotation: No description. + scale: Scale of 3D model in directions X,Y and Z. Should be uniform. + translation: 3D offset of the model. + camera: Initial camera position and target. + status: The status of the revision. + metadata: Custom, application specific metadata. String key -> String value. Limits: Maximum length of key is 32 bytes, value 512 bytes, up to 16 key-value pairs. + thumbnail_threed_file_id: The threed file ID of a thumbnail for the revision. Use /3d/files/{id} to retrieve the file. + thumbnail_url: The URL of a thumbnail for the revision. + asset_mapping_count: The number of asset mappings for this revision. + created_time: The creation time of the resource, in milliseconds since January 1, 1970 at 00:00 UTC. """ def __init__( @@ -380,13 +380,13 @@ class ThreeDModelRevisionWrite(ThreeDModelRevisionCore): This is the write version of ThreeDModelRevision, which is used when creating 3D model revisions. Args: - file_id (int): The file id to a file uploaded to Cognite's Files API. Can only be set on revision creation, and can never be updated. - published (bool): True if the revision is marked as published. - rotation (list[float] | None): No description. - scale (list[float] | None): Scale of 3D model in directions X,Y and Z. Should be uniform. - translation (list[float] | None): 3D offset of the model. - camera (RevisionCameraProperties | dict[str, Any] | None): Initial camera position and target. - metadata (dict[str, str] | None): Custom, application specific metadata. String key -> String value. Limits: Maximum length of key is 32 bytes, value 512 bytes, up to 16 key-value pairs. + file_id: The file id to a file uploaded to Cognite's Files API. Can only be set on revision creation, and can never be updated. + published: True if the revision is marked as published. + rotation: No description. + scale: Scale of 3D model in directions X,Y and Z. Should be uniform. + translation: 3D offset of the model. + camera: Initial camera position and target. + metadata: Custom, application specific metadata. String key -> String value. Limits: Maximum length of key is 32 bytes, value 512 bytes, up to 16 key-value pairs. """ def __init__( @@ -430,7 +430,7 @@ class ThreeDModelRevisionUpdate(CogniteUpdate): """No description. Args: - id (int): A server-generated ID for the object. + id: A server-generated ID for the object. """ class _PrimitiveThreeDModelRevisionUpdate(CognitePrimitiveUpdate): @@ -518,14 +518,14 @@ class ThreeDNode(CogniteResource): """No description. Args: - id (int): The ID of the node. - tree_index (int): The index of the node in the 3D model hierarchy, starting from 0. The tree is traversed in a depth-first order. - parent_id (int | None): The parent of the node, null if it is the root node. - depth (int): The depth of the node in the tree, starting from 0 at the root node. - name (str): The name of the node. - subtree_size (int): The number of descendants of the node, plus one (counting itself). - properties (dict[str, dict[str, str]] | None): Properties extracted from 3D model, with property categories containing key/value string pairs. - bounding_box (BoundingBox3D | None): The bounding box of the subtree with this sector as the root sector. Is null if there are no geometries in the subtree. + id: The ID of the node. + tree_index: The index of the node in the 3D model hierarchy, starting from 0. The tree is traversed in a depth-first order. + parent_id: The parent of the node, null if it is the root node. + depth: The depth of the node in the tree, starting from 0 at the root node. + name: The name of the node. + subtree_size: The number of descendants of the node, plus one (counting itself). + properties: Properties extracted from 3D model, with property categories containing key/value string pairs. + bounding_box: The bounding box of the subtree with this sector as the root sector. Is null if there are no geometries in the subtree. """ def __init__( @@ -576,8 +576,8 @@ class ThreeDAssetMappingCore(WriteableCogniteResource["ThreeDAssetMappingWrite"] """No description. Args: - node_id (int): The ID of the node. - asset_id (int | None): The ID of the associated asset (Cognite's Assets API). + node_id: The ID of the node. + asset_id: The ID of the associated asset (Cognite's Assets API). """ def __init__( @@ -594,10 +594,10 @@ class ThreeDAssetMapping(ThreeDAssetMappingCore): This is the read version of ThreeDAssetMapping, which is used when retrieving 3D asset mappings. Args: - node_id (int): The ID of the node. - asset_id (int | None): The ID of the associated asset (Cognite's Assets API). - tree_index (int | None): A number describing the position of this node in the 3D hierarchy, starting from 0. The tree is traversed in a depth-first order. - subtree_size (int | None): The number of nodes in the subtree of this node (this number included the node itself). + node_id: The ID of the node. + asset_id: The ID of the associated asset (Cognite's Assets API). + tree_index: A number describing the position of this node in the 3D hierarchy, starting from 0. The tree is traversed in a depth-first order. + subtree_size: The number of nodes in the subtree of this node (this number included the node itself). """ def __init__( @@ -638,8 +638,8 @@ class ThreeDAssetMappingWrite(ThreeDAssetMappingCore): This is the write version of ThreeDAssetMapping, which is used when creating 3D asset mappings. Args: - node_id (int): The ID of the node. - asset_id (int | None): The ID of the associated asset (Cognite's Assets API). + node_id: The ID of the node. + asset_id: The ID of the associated asset (Cognite's Assets API). """ def __init__(self, node_id: int, asset_id: int | None = None) -> None: diff --git a/cognite/client/data_classes/time_series.py b/cognite/client/data_classes/time_series.py index b53a8d1796..822f73d43e 100644 --- a/cognite/client/data_classes/time_series.py +++ b/cognite/client/data_classes/time_series.py @@ -43,21 +43,21 @@ class TimeSeries(WriteableCogniteResourceWithClientRef["TimeSeriesWrite"]): of TimesSeries, which is used when retrieving from CDF. Args: - id (int): A server-generated ID for the object. - created_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - last_updated_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - is_step (bool): Whether the time series is a step series or not. - is_string (bool): Whether the time series is string valued or not. - external_id (str | None): The externally supplied ID for the time series. - instance_id (NodeId | None): The Instance ID for the time series. (Only applicable for time series created in DMS) - name (str | None): The display short name of the time series. - metadata (dict[str, str] | None): Custom, application-specific metadata. String key -> String value. Limits: Maximum length of key is 32 bytes, value 512 bytes, up to 16 key-value pairs. - unit (str | None): The physical unit of the time series. - unit_external_id (str | None): The physical unit of the time series (reference to unit catalog). Only available for numeric time series. - asset_id (int | None): Asset ID of equipment linked to this time series. - description (str | None): Description of the time series. - security_categories (Sequence[int] | None): The required security categories to access this time series. - data_set_id (int | None): The dataSet ID for the item. + id: A server-generated ID for the object. + created_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + last_updated_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + is_step: Whether the time series is a step series or not. + is_string: Whether the time series is string valued or not. + external_id: The externally supplied ID for the time series. + instance_id: The Instance ID for the time series. (Only applicable for time series created in DMS) + name: The display short name of the time series. + metadata: Custom, application-specific metadata. String key -> String value. Limits: Maximum length of key is 32 bytes, value 512 bytes, up to 16 key-value pairs. + unit: The physical unit of the time series. + unit_external_id: The physical unit of the time series (reference to unit catalog). Only available for numeric time series. + asset_id: Asset ID of equipment linked to this time series. + description: Description of the time series. + security_categories: The required security categories to access this time series. + data_set_id: The dataSet ID for the item. """ def __init__( @@ -145,13 +145,13 @@ async def count_async(self) -> int: This result may not be completely accurate, as it is based on aggregates which may be occasionally out of date. Returns: - int: The number of datapoints in this time series. + The number of datapoints in this time series. Raises: RuntimeError: If the time series is string, as count aggregate is only supported for numeric data Returns: - int: The total number of datapoints + The total number of datapoints """ if self.is_string: raise RuntimeError("String time series does not support count aggregate.") @@ -171,9 +171,9 @@ async def latest_async(self, before: int | str | datetime | None = None) -> Data """Returns the latest datapoint in this time series. If empty, returns None. Args: - before (int | str | datetime | None): Get latest datapoint before this time. + before: Get latest datapoint before this time. Returns: - Datapoint | None: A datapoint object containing the value and timestamp of the latest datapoint. + A datapoint object containing the value and timestamp of the latest datapoint. """ identifier = Identifier.load(self.id, self.external_id, self.instance_id).as_dict() if dps := await self._cognite_client.time_series.data.retrieve_latest(**identifier, before=before): @@ -188,7 +188,7 @@ async def first_async(self) -> Datapoint | None: """Returns the first datapoint in this time series. If empty, returns None. Returns: - Datapoint | None: A datapoint object containing the value and timestamp of the first datapoint. + A datapoint object containing the value and timestamp of the first datapoint. """ identifier = Identifier.load(self.id, self.external_id, self.instance_id).as_dict() dps = await self._cognite_client.time_series.data.retrieve( @@ -206,7 +206,7 @@ async def asset_async(self) -> Asset: """Returns the asset this time series belongs to. Returns: - Asset: The asset given by its `asset_id`. + The asset given by its `asset_id`. Raises: ValueError: If asset_id is missing. """ @@ -223,18 +223,18 @@ class TimeSeriesWrite(WriteableCogniteResource["TimeSeriesWrite"]): """This is the write version of TimeSeries, which is used when writing to CDF. Args: - external_id (str | None): The externally supplied ID for the time series. - instance_id (NodeId | None): The Instance ID for the time series. (Only applicable for time series created in DMS) - name (str | None): The display short name of the time series. - is_string (bool | None): Whether the time series is string valued or not. - metadata (dict[str, str] | None): Custom, application-specific metadata. String key -> String value. Limits: Maximum length of key is 32 bytes, value 512 bytes, up to 16 key-value pairs. - unit (str | None): The physical unit of the time series. - unit_external_id (str | None): The physical unit of the time series (reference to unit catalog). Only available for numeric time series. - asset_id (int | None): Asset ID of equipment linked to this time series. - is_step (bool | None): Whether the time series is a step series or not. - description (str | None): Description of the time series. - security_categories (Sequence[int] | None): The required security categories to access this time series. - data_set_id (int | None): The dataSet ID for the item. + external_id: The externally supplied ID for the time series. + instance_id: The Instance ID for the time series. (Only applicable for time series created in DMS) + name: The display short name of the time series. + is_string: Whether the time series is string valued or not. + metadata: Custom, application-specific metadata. String key -> String value. Limits: Maximum length of key is 32 bytes, value 512 bytes, up to 16 key-value pairs. + unit: The physical unit of the time series. + unit_external_id: The physical unit of the time series (reference to unit catalog). Only available for numeric time series. + asset_id: Asset ID of equipment linked to this time series. + is_step: Whether the time series is a step series or not. + description: Description of the time series. + security_categories: The required security categories to access this time series. + data_set_id: The dataSet ID for the item. """ def __init__( @@ -299,20 +299,20 @@ class TimeSeriesFilter(CogniteFilter): """No description. Args: - name (str | None): Filter on name. - unit (str | None): Filter on unit. - unit_external_id (str | None): Filter on unit external ID. - unit_quantity (str | None): Filter on unit quantity. - is_string (bool | None): Filter on isString. - is_step (bool | None): Filter on isStep. - metadata (dict[str, str] | None): Custom, application specific metadata. String key -> String value. Limits: Maximum length of key is 32 bytes, value 512 bytes, up to 16 key-value pairs. - asset_ids (Sequence[int] | None): Only include time series that reference these specific asset IDs. - asset_external_ids (SequenceNotStr[str] | None): Asset External IDs of related equipment that this time series relates to. - asset_subtree_ids (Sequence[dict[str, Any]] | None): Only include time series that are related to an asset in a subtree rooted at any of these asset IDs or external IDs. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. - data_set_ids (Sequence[dict[str, Any]] | None): No description. - external_id_prefix (str | None): Filter by this (case-sensitive) prefix for the external ID. - created_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. - last_updated_time (dict[str, Any] | TimestampRange | None): Range between two timestamps. + name: Filter on name. + unit: Filter on unit. + unit_external_id: Filter on unit external ID. + unit_quantity: Filter on unit quantity. + is_string: Filter on isString. + is_step: Filter on isStep. + metadata: Custom, application specific metadata. String key -> String value. Limits: Maximum length of key is 32 bytes, value 512 bytes, up to 16 key-value pairs. + asset_ids: Only include time series that reference these specific asset IDs. + asset_external_ids: Asset External IDs of related equipment that this time series relates to. + asset_subtree_ids: Only include time series that are related to an asset in a subtree rooted at any of these asset IDs or external IDs. If the total size of the given subtrees exceeds 100,000 assets, an error will be returned. + data_set_ids: No description. + external_id_prefix: Filter by this (case-sensitive) prefix for the external ID. + created_time: Range between two timestamps. + last_updated_time: Range between two timestamps. """ def __init__( @@ -352,9 +352,9 @@ class TimeSeriesUpdate(CogniteUpdate): """Changes will be applied to time series. Args: - id (int | None): A server-generated ID for the object. - external_id (str | None): The external ID provided by the client. Must be unique for the resource type. - instance_id (NodeId | None): The ID of the instance this time series belongs to. + id: A server-generated ID for the object. + external_id: The external ID provided by the client. Must be unique for the resource type. + instance_id: The ID of the instance this time series belongs to. """ def __init__( diff --git a/cognite/client/data_classes/transformations/__init__.py b/cognite/client/data_classes/transformations/__init__.py index baebc04469..0c22a9819f 100644 --- a/cognite/client/data_classes/transformations/__init__.py +++ b/cognite/client/data_classes/transformations/__init__.py @@ -41,9 +41,9 @@ class SessionDetails: """Details of a source session. Args: - session_id (int | None): CDF source session ID - client_id (str | None): Idp source client ID - project_name (str | None): CDF source project name + session_id: CDF source session ID + client_id: Idp source client ID + project_name: CDF source project name """ def __init__( @@ -72,10 +72,10 @@ def dump(self, camel_case: bool = True) -> dict[str, Any]: """Dump the instance into a json serializable Python data type. Args: - camel_case (bool): Use camelCase for attribute names. Defaults to True. + camel_case: Use camelCase for attribute names. Defaults to True. Returns: - dict[str, Any]: A dictionary representation of the instance. + A dictionary representation of the instance. """ ret = vars(self) @@ -177,30 +177,30 @@ class Transformation(WriteableCogniteResourceWithClientRef["TransformationWrite" """The transformation resource allows transforming data in CDF. Args: - id (int): A server-generated ID for the object. - external_id (str): The external ID provided by the client. Must be unique for the resource type. - name (str): The name of the Transformation. - query (str): SQL query of the transformation. - destination (TransformationDestination): see TransformationDestination for options. - conflict_mode (str): What to do in case of id collisions: either "abort", "upsert", "update" or "delete" - is_public (bool): Indicates if the transformation is visible to all in project or only to the owner. - ignore_null_fields (bool): Indicates how null values are handled on updates: ignore or set null. - source_oidc_credentials (OidcCredentials | None): Configure the transformation to authenticate with the given oidc credentials key on the destination. - destination_oidc_credentials (OidcCredentials | None): Configure the transformation to authenticate with the given oidc credentials on the destination. - created_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - last_updated_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - owner (str): Owner of the transformation: requester's identity. - owner_is_current_user (bool): Indicates if the transformation belongs to the current user. - running_job (TransformationJob | None): Details for the job of this transformation currently running. - last_finished_job (TransformationJob | None): Details for the last finished job of this transformation. - blocked (TransformationBlockedInfo | None): Provides reason and time if the transformation is blocked. - schedule (TransformationSchedule | None): Details for the schedule if the transformation is scheduled. - data_set_id (int | None): No description. - source_nonce (NonceCredentials | None): Single use credentials to bind to a CDF session for reading. - destination_nonce (NonceCredentials | None): Single use credentials to bind to a CDF session for writing. - source_session (SessionDetails | None): Details for the session used to read from the source project. - destination_session (SessionDetails | None): Details for the session used to write to the destination project. - tags (list[str] | None): No description. + id: A server-generated ID for the object. + external_id: The external ID provided by the client. Must be unique for the resource type. + name: The name of the Transformation. + query: SQL query of the transformation. + destination: see TransformationDestination for options. + conflict_mode: What to do in case of id collisions: either "abort", "upsert", "update" or "delete" + is_public: Indicates if the transformation is visible to all in project or only to the owner. + ignore_null_fields: Indicates how null values are handled on updates: ignore or set null. + source_oidc_credentials: Configure the transformation to authenticate with the given oidc credentials key on the destination. + destination_oidc_credentials: Configure the transformation to authenticate with the given oidc credentials on the destination. + created_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + last_updated_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + owner: Owner of the transformation: requester's identity. + owner_is_current_user: Indicates if the transformation belongs to the current user. + running_job: Details for the job of this transformation currently running. + last_finished_job: Details for the last finished job of this transformation. + blocked: Provides reason and time if the transformation is blocked. + schedule: Details for the schedule if the transformation is scheduled. + data_set_id: No description. + source_nonce: Single use credentials to bind to a CDF session for reading. + destination_nonce: Single use credentials to bind to a CDF session for writing. + source_session: Details for the session used to read from the source project. + destination_session: Details for the session used to write to the destination project. + tags: No description. """ def __init__( @@ -331,10 +331,10 @@ def dump(self, camel_case: bool = True) -> dict[str, Any]: """Dump the instance into a json serializable Python data type. Args: - camel_case (bool): Use camelCase for attribute names. Defaults to True. + camel_case: Use camelCase for attribute names. Defaults to True. Returns: - dict[str, Any]: A dictionary representation of the instance. + A dictionary representation of the instance. """ ret = super().dump(camel_case=camel_case) @@ -346,10 +346,10 @@ def dump(self, camel_case: bool = True) -> dict[str, Any]: async def run_async(self, wait: bool = True, timeout: float | None = None) -> TransformationJob: """Run this transformation. Args: - wait (bool): Whether to wait for the transformation to finish. Defaults to True. - timeout (float | None): How long to wait for the transformation to finish, in seconds. If None, wait indefinitely. Only used if `wait` is True. Defaults to None. + wait: Whether to wait for the transformation to finish. Defaults to True. + timeout: How long to wait for the transformation to finish, in seconds. If None, wait indefinitely. Only used if `wait` is True. Defaults to None. Returns: - TransformationJob: The started transformation job. + The started transformation job. """ return await self._cognite_client.transformations.run(transformation_id=self.id, wait=wait, timeout=timeout) @@ -413,19 +413,19 @@ class TransformationWrite(WriteableCogniteResource["TransformationWrite"], _Tran """The transformation resource allows transforming data in CDF. Args: - external_id (str): The external ID provided by the client. Must be unique for the resource type. - name (str): The name of the Transformation. - ignore_null_fields (bool): Indicates how null values are handled on updates: ignore or set null. - query (str | None): SQL query of the transformation. - destination (TransformationDestination | None): see TransformationDestination for options. - conflict_mode (Literal['abort', 'delete', 'update', 'upsert'] | None): What to do in case of id collisions: either "abort", "upsert", "update" or "delete" - is_public (bool): Indicates if the transformation is visible to all in project or only to the owner. - source_oidc_credentials (OidcCredentials | None): Configure the transformation to authenticate with the given oidc credentials key on the destination. - destination_oidc_credentials (OidcCredentials | None): Configure the transformation to authenticate with the given oidc credentials on the destination. - data_set_id (int | None): No description. - source_nonce (NonceCredentials | None): Single use credentials to bind to a CDF session for reading. - destination_nonce (NonceCredentials | None): Single use credentials to bind to a CDF session for writing. - tags (list[str] | None): No description. + external_id: The external ID provided by the client. Must be unique for the resource type. + name: The name of the Transformation. + ignore_null_fields: Indicates how null values are handled on updates: ignore or set null. + query: SQL query of the transformation. + destination: see TransformationDestination for options. + conflict_mode: What to do in case of id collisions: either "abort", "upsert", "update" or "delete" + is_public: Indicates if the transformation is visible to all in project or only to the owner. + source_oidc_credentials: Configure the transformation to authenticate with the given oidc credentials key on the destination. + destination_oidc_credentials: Configure the transformation to authenticate with the given oidc credentials on the destination. + data_set_id: No description. + source_nonce: Single use credentials to bind to a CDF session for reading. + destination_nonce: Single use credentials to bind to a CDF session for writing. + tags: No description. """ def __init__( @@ -497,10 +497,10 @@ def dump(self, camel_case: bool = True) -> dict[str, Any]: """Dump the instance into a json serializable Python data type. Args: - camel_case (bool): Use camelCase for attribute names. Defaults to True. + camel_case: Use camelCase for attribute names. Defaults to True. Returns: - dict[str, Any]: A dictionary representation of the instance. + A dictionary representation of the instance. """ ret = super().dump(camel_case=camel_case) @@ -518,8 +518,8 @@ class TransformationUpdate(CogniteUpdate): """Changes applied to transformation Args: - id (int): A server-generated ID for the object. - external_id (str): External Id provided by client. Should be unique within the project. + id: A server-generated ID for the object. + external_id: External Id provided by client. Should be unique within the project. """ class _PrimitiveTransformationUpdate(CognitePrimitiveUpdate): @@ -636,7 +636,7 @@ class ContainsAny(TagsFilter): """Return transformations that has one of the tags specified. Args: - tags (list[str] | None): The resource item contains at least one of the listed tags. The tags are defined by a list of external ids. + tags: The resource item contains at least one of the listed tags. The tags are defined by a list of external ids. Examples: @@ -658,17 +658,17 @@ class TransformationFilter(CogniteFilter): """No description. Args: - include_public (bool): Whether public transformations should be included in the results. The default is true. - name_regex (str | None): Regex expression to match the transformation name - query_regex (str | None): Regex expression to match the transformation query - destination_type (str | None): Transformation destination resource name to filter by. - conflict_mode (str | None): Filters by a selected transformation action type: abort/create, upsert, update, delete - cdf_project_name (str | None): Project name to filter by configured source and destination project - has_blocked_error (bool | None): Whether only the blocked transformations should be included in the results. - created_time (dict[str, Any] | TimestampRange | None): Range between two timestamps - last_updated_time (dict[str, Any] | TimestampRange | None): Range between two timestamps - data_set_ids (list[dict[str, Any]] | None): Return only transformations in the specified data sets with these ids, e.g. [{"id": 1}, {"externalId": "foo"}]. - tags (TagsFilter | None): Return only the resource matching the specified tags constraints. It only supports ContainsAny as of now. + include_public: Whether public transformations should be included in the results. The default is true. + name_regex: Regex expression to match the transformation name + query_regex: Regex expression to match the transformation query + destination_type: Transformation destination resource name to filter by. + conflict_mode: Filters by a selected transformation action type: abort/create, upsert, update, delete + cdf_project_name: Project name to filter by configured source and destination project + has_blocked_error: Whether only the blocked transformations should be included in the results. + created_time: Range between two timestamps + last_updated_time: Range between two timestamps + data_set_ids: Return only transformations in the specified data sets with these ids, e.g. [{"id": 1}, {"externalId": "foo"}]. + tags: Return only the resource matching the specified tags constraints. It only supports ContainsAny as of now. """ def __init__( @@ -711,8 +711,8 @@ class TransformationPreviewResult(CogniteResource): """Allows previewing the result of a sql transformation before executing it. Args: - schema (TransformationSchemaColumnList): List of column descriptions. - results (list[dict]): List of resulting rows. Each row is a dictionary where the key is the column name and the value is the entry. + schema: List of column descriptions. + results: List of resulting rows. Each row is a dictionary where the key is the column name and the value is the entry. """ def __init__( @@ -734,10 +734,10 @@ def dump(self, camel_case: bool = True) -> dict[str, Any]: """Dump the instance into a json serializable Python data type. Args: - camel_case (bool): Use camelCase for attribute names. Defaults to True. + camel_case: Use camelCase for attribute names. Defaults to True. Returns: - dict[str, Any]: A dictionary representation of the instance. + A dictionary representation of the instance. """ return { "schema": {"items": self.schema.dump(camel_case=camel_case)}, diff --git a/cognite/client/data_classes/transformations/common.py b/cognite/client/data_classes/transformations/common.py index e201a37fbe..a646a88a5e 100644 --- a/cognite/client/data_classes/transformations/common.py +++ b/cognite/client/data_classes/transformations/common.py @@ -15,7 +15,7 @@ class TransformationDestination(CogniteResource): """TransformationDestination has static methods to define the target resource type of a transformation Args: - type (str): Used as data type identifier on transformation creation/retrieval. + type: Used as data type identifier on transformation creation/retrieval. """ def __init__(self, type: str) -> None: @@ -96,11 +96,11 @@ def raw(database: str = "", table: str = "") -> RawTable: """To be used when the transformation is meant to produce raw table rows. Args: - database (str): database name of the target raw table. - table (str): name of the target raw table + database: database name of the target raw table. + table: name of the target raw table Returns: - RawTable: TransformationDestination pointing to the target table + TransformationDestination pointing to the target table """ return RawTable(database=database, table=table) @@ -109,10 +109,10 @@ def sequence_rows(external_id: str = "") -> SequenceRowsDestination: """To be used when the transformation is meant to produce sequence rows. Args: - external_id (str): Sequence external id. + external_id: Sequence external id. Returns: - SequenceRowsDestination: TransformationDestination pointing to the target sequence rows + TransformationDestination pointing to the target sequence rows """ return SequenceRowsDestination(external_id=external_id) @@ -121,10 +121,10 @@ def nodes(view: ViewInfo | None = None, instance_space: str | None = None) -> No """ Args: - view (ViewInfo | None): information of the view. - instance_space (str | None): space id of the instance. + view: information of the view. + instance_space: space id of the instance. Returns: - Nodes: pointing to the target flexible data model. + pointing to the target flexible data model. """ return Nodes(view=view, instance_space=instance_space) @@ -137,11 +137,11 @@ def edges( """ Args: - view (ViewInfo | None): information of the view. - instance_space (str | None): space id of the instance. - edge_type (EdgeType | None): information about the type of the edge + view: information of the view. + instance_space: space id of the instance. + edge_type: information about the type of the edge Returns: - Edges: pointing to the target flexible data model. + pointing to the target flexible data model. """ return Edges(view=view, instance_space=instance_space, edge_type=edge_type) @@ -149,10 +149,10 @@ def edges( def instances(data_model: DataModelInfo | None = None, instance_space: str | None = None) -> Instances: """ Args: - data_model (DataModelInfo | None): information of the Data Model. - instance_space (str | None): space id of the instance. + data_model: information of the Data Model. + instance_space: space id of the instance. Returns: - Instances: pointing to the target centric data model. + pointing to the target centric data model. """ return Instances(data_model=data_model, instance_space=instance_space) @@ -331,12 +331,12 @@ class OidcCredentials: destination_oidc_credentials. Args: - client_id (str): Your application's client id. - client_secret (str): Your application's client secret - token_uri (str): OAuth token url - cdf_project_name (str): Name of CDF project - scopes (str | list[str] | None): A list of scopes or a comma-separated string (for backwards compatibility). - audience (str | None): Audience (optional) + client_id: Your application's client id. + client_secret: Your application's client secret + token_uri: OAuth token url + cdf_project_name: Name of CDF project + scopes: A list of scopes or a comma-separated string (for backwards compatibility). + audience: Audience (optional) """ def __init__( @@ -383,10 +383,10 @@ def dump(self, camel_case: bool = True) -> dict[str, Any]: """Dump the instance into a json serializable Python data type. Args: - camel_case (bool): Use camelCase for attribute names. Defaults to True. + camel_case: Use camelCase for attribute names. Defaults to True. Returns: - dict[str, Any]: A dictionary representation of the instance. + A dictionary representation of the instance. """ return basic_obj_dump(self, camel_case) @@ -395,9 +395,9 @@ def load(cls, data: dict[str, Any]) -> Self: """Load data into the instance. Args: - data (dict[str, Any]): A dictionary representation of the instance. + data: A dictionary representation of the instance. Returns: - Self: No description. + No description. """ return cls( client_id=data["clientId"], @@ -428,10 +428,10 @@ def dump(self, camel_case: bool = True) -> dict[str, Any]: """Dump the instance into a json serializable Python data type. Args: - camel_case (bool): Use camelCase for attribute names. Defaults to True. + camel_case: Use camelCase for attribute names. Defaults to True. Returns: - dict[str, Any]: A dictionary representation of the instance. + A dictionary representation of the instance. """ return basic_obj_dump(self, camel_case) @@ -440,9 +440,9 @@ def load(cls, data: dict[str, Any]) -> NonceCredentials: """Load data into the instance. Args: - data (dict[str, Any]): A dictionary representation of the instance. + data: A dictionary representation of the instance. Returns: - NonceCredentials: No description. + No description. """ return cls( session_id=data["sessionId"], @@ -459,8 +459,8 @@ class TransformationBlockedInfo: """Information about the reason why and when a transformation is blocked. Args: - reason (str): Reason why the transformation is blocked. - created_time (int): Timestamp when the transformation was blocked. + reason: Reason why the transformation is blocked. + created_time: Timestamp when the transformation was blocked. """ def __init__(self, reason: str, created_time: int) -> None: diff --git a/cognite/client/data_classes/transformations/jobs.py b/cognite/client/data_classes/transformations/jobs.py index de73e2f1d9..9414160a8b 100644 --- a/cognite/client/data_classes/transformations/jobs.py +++ b/cognite/client/data_classes/transformations/jobs.py @@ -29,9 +29,9 @@ class TransformationJobMetric(CogniteResource): """The transformation job metric resource allows following details of execution of a transformation run. Args: - timestamp (int): Time of the last metric update. - name (str): Name of the metric. - count (int): Value of the metric. + timestamp: Time of the last metric update. + name: Name of the metric. + count: Value of the metric. """ def __init__( @@ -61,21 +61,21 @@ class TransformationJob(CogniteResourceWithClientRef): """The transformation job resource allows following the status of execution of a transformation run. Args: - id (int): A server-generated ID for the object. - status (TransformationJobStatus): Status of the job. - transformation_id (int): Server-generated ID of the transformation. - transformation_external_id (str): external ID of the transformation. - source_project (str): Name of the CDF project the data will be read from. - destination_project (str): Name of the CDF project the data will be written to. - destination (TransformationDestination): No description. - conflict_mode (str): What to do in case of id collisions: either "abort", "upsert", "update" or "delete". - query (str): Query of the transformation that is being executed. - error (str | None): Error message from the server. - ignore_null_fields (bool): Indicates how null values are handled on updates: ignore or set null. - created_time (int | None): Time when the job was created. - started_time (int | None): Time when the job started running. - finished_time (int | None): Time when the job finished running. - last_seen_time (int | None): Time of the last status update from the job. + id: A server-generated ID for the object. + status: Status of the job. + transformation_id: Server-generated ID of the transformation. + transformation_external_id: external ID of the transformation. + source_project: Name of the CDF project the data will be read from. + destination_project: Name of the CDF project the data will be written to. + destination: No description. + conflict_mode: What to do in case of id collisions: either "abort", "upsert", "update" or "delete". + query: Query of the transformation that is being executed. + error: Error message from the server. + ignore_null_fields: Indicates how null values are handled on updates: ignore or set null. + created_time: Time when the job was created. + started_time: Time when the job started running. + finished_time: Time when the job finished running. + last_seen_time: Time of the last status update from the job. """ def __init__( @@ -156,11 +156,11 @@ async def wait_async(self, polling_interval: float = 5, timeout: float | None = """`Waits for the job to finish.` Args: - polling_interval (float): time (s) to wait between job status updates, default is one second. - timeout (float | None): maximum time (s) to wait, default is None (infinite time). Once the timeout is reached, it returns with the current status. + polling_interval: time (s) to wait between job status updates, default is one second. + timeout: maximum time (s) to wait, default is None (infinite time). Once the timeout is reached, it returns with the current status. Returns: - TransformationJob: The transformation job (itself). + The transformation job (itself). Examples: Run transformations 1 and 2 in parallel, and run 3 once they finish successfully: @@ -251,8 +251,8 @@ class TransformationJobFilter(CogniteFilter): """TransformationJobFilter Args: - transformation_id (int | None): Filter jobs by transformation internal numeric ID. - transformation_external_id (str | None): Filter jobs by transformation external ID. + transformation_id: Filter jobs by transformation internal numeric ID. + transformation_external_id: Filter jobs by transformation external ID. """ def __init__(self, transformation_id: int | None = None, transformation_external_id: str | None = None) -> None: diff --git a/cognite/client/data_classes/transformations/notifications.py b/cognite/client/data_classes/transformations/notifications.py index 0c7a8c4b18..0a11c68f46 100644 --- a/cognite/client/data_classes/transformations/notifications.py +++ b/cognite/client/data_classes/transformations/notifications.py @@ -19,7 +19,7 @@ class TransformationNotificationCore(WriteableCogniteResource["TransformationNot """The transformation notification resource allows configuring email alerts on events related to a transformation run. Args: - destination (str): Email address where notifications should be sent. + destination: Email address where notifications should be sent. """ def __init__( @@ -34,12 +34,12 @@ class TransformationNotification(TransformationNotificationCore): This is the read format of a transformation notification. Args: - id (int): A server-generated ID for the object. - transformation_id (int): Transformation Id. - transformation_external_id (str): Transformation external Id. - destination (str): Email address where notifications should be sent. - created_time (int): Time when the notification was created. - last_updated_time (int): Time when the notification was last updated. + id: A server-generated ID for the object. + transformation_id: Transformation Id. + transformation_external_id: Transformation external Id. + destination: Email address where notifications should be sent. + created_time: Time when the notification was created. + last_updated_time: Time when the notification was last updated. """ def __init__( @@ -87,9 +87,9 @@ class TransformationNotificationWrite(TransformationNotificationCore): This is the write format of a transformation notification. Args: - destination (str): Email address where notifications should be sent. - transformation_id (int | None): Transformation ID. - transformation_external_id (str | None): Transformation external ID. + destination: Email address where notifications should be sent. + transformation_id: Transformation ID. + transformation_external_id: Transformation external ID. """ def __init__( @@ -136,9 +136,9 @@ class TransformationNotificationFilter(CogniteFilter): """TransformationNotificationFilter Args: - transformation_id (int | None): Filter by transformation internal numeric ID. - transformation_external_id (str | None): Filter by transformation externalId. - destination (str | None): Filter by notification destination. + transformation_id: Filter by transformation internal numeric ID. + transformation_external_id: Filter by transformation externalId. + destination: Filter by notification destination. """ def __init__( diff --git a/cognite/client/data_classes/transformations/schedules.py b/cognite/client/data_classes/transformations/schedules.py index af64b2744b..5b44310f60 100644 --- a/cognite/client/data_classes/transformations/schedules.py +++ b/cognite/client/data_classes/transformations/schedules.py @@ -22,8 +22,8 @@ class TransformationScheduleCore(WriteableCogniteResource["TransformationSchedul """The transformation schedules resource allows running recurrent transformations. Args: - interval (str | None): Cron expression controls when the transformation will be run. Use http://www.cronmaker.com to create one. - is_paused (bool): If true, the transformation is not scheduled. + interval: Cron expression controls when the transformation will be run. Use http://www.cronmaker.com to create one. + is_paused: If true, the transformation is not scheduled. """ def __init__(self, interval: str | None, is_paused: bool) -> None: @@ -35,12 +35,12 @@ class TransformationSchedule(TransformationScheduleCore): """The transformation schedules resource allows running recurrent transformations. Args: - id (int): Transformation id. - external_id (str): Transformation external id. - created_time (int): Time when the schedule was created. - last_updated_time (int): Time when the schedule was last updated. - interval (str): Cron expression controls when the transformation will be run. Use http://www.cronmaker.com to create one. - is_paused (bool): If true, the transformation is not scheduled. + id: Transformation id. + external_id: Transformation external id. + created_time: Time when the schedule was created. + last_updated_time: Time when the schedule was last updated. + interval: Cron expression controls when the transformation will be run. Use http://www.cronmaker.com to create one. + is_paused: If true, the transformation is not scheduled. """ def __init__( @@ -93,10 +93,10 @@ class TransformationScheduleWrite(TransformationScheduleCore): """The transformation schedules resource allows running recurrent transformations. Args: - interval (str): Cron expression controls when the transformation will be run. Use http://www.cronmaker.com to create one. - id (int | None): Transformation id. - external_id (str | None): Transformation external id. - is_paused (bool): If true, the transformation is not scheduled. + interval: Cron expression controls when the transformation will be run. Use http://www.cronmaker.com to create one. + id: Transformation id. + external_id: Transformation external id. + is_paused: If true, the transformation is not scheduled. """ def __init__( @@ -130,8 +130,8 @@ class TransformationScheduleUpdate(CogniteUpdate): """Changes applied to transformation schedule Args: - id (int): Transformation id. - external_id (str): Transformation externalId. + id: Transformation id. + external_id: Transformation externalId. """ class _PrimitiveTransformationScheduleUpdate(CognitePrimitiveUpdate): diff --git a/cognite/client/data_classes/transformations/schema.py b/cognite/client/data_classes/transformations/schema.py index 7d1a131bad..9d7800397f 100644 --- a/cognite/client/data_classes/transformations/schema.py +++ b/cognite/client/data_classes/transformations/schema.py @@ -88,10 +88,10 @@ class TransformationSchemaColumn(CogniteResource): """Represents a column of the expected sql structure for a destination type. Args: - name (str): Column name - sql_type (str): Type of the column in sql format. - type (TransformationSchemaType): Type of the column in json format. - nullable (bool): Values for the column can be null or not + name: Column name + sql_type: Type of the column in sql format. + type: Type of the column in json format. + nullable: Values for the column can be null or not """ def __init__( diff --git a/cognite/client/data_classes/units.py b/cognite/client/data_classes/units.py index 79cab8cd85..d847e49436 100644 --- a/cognite/client/data_classes/units.py +++ b/cognite/client/data_classes/units.py @@ -20,8 +20,8 @@ class UnitConversion: The conversion between a unit and its base unit. Args: - multiplier (float): The multiplier to convert from the unit to the base unit. - offset (float): The offset to convert from the unit to the base unit. + multiplier: The multiplier to convert from the unit to the base unit. + offset: The offset to convert from the unit to the base unit. """ multiplier: float @@ -46,8 +46,8 @@ class UnitID(CogniteResource): Unit Identifier Args: - unit_external_id (str): External ID of the unit. - name (str): Name of the unit. + unit_external_id: External ID of the unit. + name: Name of the unit. """ def __init__(self, unit_external_id: str, name: str) -> None: @@ -67,16 +67,15 @@ class Unit(CogniteResource): This class represents a Unit in CDF. Args: - external_id (str): A unique identifier of the unit. - name (str): The name of the unit, e.g. DEG_C for Celsius. - long_name (str): A more descriptive name of the unit, e.g., degrees Celsius. - symbol (str): The symbol of the unit, e.g., °C. - alias_names (list[str]): List of alias names for the unit, e.g., Degree C, degC, °C, and so on. - quantity (str): The quantity of the unit, e.g., temperature. - conversion (UnitConversion): The conversion between the unit and its base unit. For example, the base unit for - temperature is Kelvin, and the conversion from Celsius to Kelvin is multiplier = 1, offset = 273.15. - source (str | None): The source of the unit, e.g., qudt.org - source_reference (str | None): The reference to the source of the unit, e.g., http://qudt.org/vocab/unit/DEG_C + external_id: A unique identifier of the unit. + name: The name of the unit, e.g. DEG_C for Celsius. + long_name: A more descriptive name of the unit, e.g., degrees Celsius. + symbol: The symbol of the unit, e.g., °C. + alias_names: List of alias names for the unit, e.g., Degree C, degC, °C, and so on. + quantity: The quantity of the unit, e.g., temperature. + conversion: The conversion between the unit and its base unit. For example, the base unit for temperature is Kelvin, and the conversion from Celsius to Kelvin is multiplier = 1, offset = 273.15. + source: The source of the unit, e.g., qudt.org + source_reference: The reference to the source of the unit, e.g., http://qudt.org/vocab/unit/DEG_C """ def __init__( @@ -139,8 +138,8 @@ class UnitSystem(CogniteResource): This class represents a Unit System in CDF. Args: - name (str): The name of the unit system, e.g., SI and Imperial. - quantities (list[UnitID]): The quantities of the unit system, e.g., length, mass, and so on. + name: The name of the unit system, e.g., SI and Imperial. + quantities: The quantities of the unit system, e.g., length, mass, and so on. """ diff --git a/cognite/client/data_classes/user_profiles.py b/cognite/client/data_classes/user_profiles.py index 3cbb716c16..93b6908621 100644 --- a/cognite/client/data_classes/user_profiles.py +++ b/cognite/client/data_classes/user_profiles.py @@ -15,13 +15,13 @@ class UserProfile(CogniteResource): for principals based on data from the identity provider configured for the CDF project. Args: - user_identifier (str): Uniquely identifies the principal the profile is associated with. This property is guaranteed to be immutable. - last_updated_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - given_name (str | None): The user's first name. - surname (str | None): The user's last name. - email (str | None): The user's email address (if any). The email address is is returned directly from the identity provider and not guaranteed to be verified. Note that the email is mutable and can be updated in the identity provider. It should not be used to uniquely identify as a user. Use the user_identifier property instead. - display_name (str | None): The display name for the user. - job_title (str | None): The user's job title. + user_identifier: Uniquely identifies the principal the profile is associated with. This property is guaranteed to be immutable. + last_updated_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + given_name: The user's first name. + surname: The user's last name. + email: The user's email address (if any). The email address is is returned directly from the identity provider and not guaranteed to be verified. Note that the email is mutable and can be updated in the identity provider. It should not be used to uniquely identify as a user. Use the user_identifier property instead. + display_name: The display name for the user. + job_title: The user's job title. """ def __init__( @@ -64,9 +64,9 @@ def _user_identifier_to_item(self) -> dict[str, UserProfile]: def get(self, user_identifier: str) -> UserProfile | None: # type: ignore [override] """Get an item from this list by user_identifier. Args: - user_identifier (str): The user_identifier of the item to get. + user_identifier: The user_identifier of the item to get. Returns: - UserProfile | None: The requested item or None if not found. + The requested item or None if not found. """ return self._user_identifier_to_item.get(user_identifier) diff --git a/cognite/client/data_classes/workflows.py b/cognite/client/data_classes/workflows.py index 5d28e92be7..73cb5842b2 100644 --- a/cognite/client/data_classes/workflows.py +++ b/cognite/client/data_classes/workflows.py @@ -58,15 +58,15 @@ class WorkflowUpsert(WorkflowCore): This class represents a workflow. This is the write version, used when creating or updating a workflow. Args: - external_id (str): The external ID provided by the client. Must be unique for the resource type. - description (str | None): Description of the workflow. Note that when updating a workflow, the description will + external_id: The external ID provided by the client. Must be unique for the resource type. + description: Description of the workflow. Note that when updating a workflow, the description will always be overwritten also if it is set to None. Meaning if the workflow already has a description, and you want to keep it, you need to provide the description when updating the workflow. - data_set_id (int | None): The id of the data set this workflow belongs to. + data_set_id: The id of the data set this workflow belongs to. If a dataSetId is provided, any operations on this workflow, or its versions, executions, and triggers will require appropriate access to the data set. More information on data sets and their configuration can be found here: https://docs.cognite.com/cdf/data_governance/concepts/datasets/ - max_concurrent_executions (int | None): Maximum concurrent executions for this workflow. Defaults to None, which means the workflow will use the project limit. + max_concurrent_executions: Maximum concurrent executions for this workflow. Defaults to None, which means the workflow will use the project limit. """ @classmethod @@ -88,12 +88,12 @@ class Workflow(WorkflowCore): This class represents a workflow. This is the read version, used when reading or listing workflows. Args: - external_id (str): The external ID provided by the client. Must be unique for the resource type. - created_time (int): The time when the workflow was created. Unix timestamp in milliseconds. - last_updated_time (int): The time when the workflow was last updated. Unix timestamp in milliseconds. - description (str | None): Description of the workflow. Defaults to None. - data_set_id (int | None): The id of the data set this workflow belongs to. - max_concurrent_executions (int | None): Maximum concurrent executions for this workflow. Defaults to None, which means the workflow will use the project limit. + external_id: The external ID provided by the client. Must be unique for the resource type. + created_time: The time when the workflow was created. Unix timestamp in milliseconds. + last_updated_time: The time when the workflow was last updated. Unix timestamp in milliseconds. + description: Description of the workflow. Defaults to None. + data_set_id: The id of the data set this workflow belongs to. + max_concurrent_executions: Maximum concurrent executions for this workflow. Defaults to None, which means the workflow will use the project limit. """ def __init__( @@ -182,9 +182,9 @@ class FunctionTaskParameters(WorkflowTaskParameters): """The function parameters are used to specify the Cognite Function to be called. Args: - external_id (str): The external ID of the function to be called. - data (dict | str | None): The data to be passed to the function. Defaults to None. The data can be used to specify the input to the function from previous tasks or the workflow input. See the tip below for more information. - is_async_complete (bool | None): Whether the function is asynchronous. Defaults to None, which the API will interpret as False. + external_id: The external ID of the function to be called. + data: The data to be passed to the function. Defaults to None. The data can be used to specify the input to the function from previous tasks or the workflow input. See the tip below for more information. + is_async_complete: Whether the function is asynchronous. Defaults to None, which the API will interpret as False. If a function is asynchronous, you need to call the client.workflows.tasks.update() endpoint to update the status of the task. While synchronous tasks update the status automatically. @@ -263,9 +263,9 @@ class SimulationTaskParameters(WorkflowTaskParameters): The simulation parameters are used to specify the simulation routine to be executed. Args: - routine_external_id (str): The external ID of the simulation routine to be executed. - run_time (int | None): Reference timestamp used for data pre-processing and data sampling. - inputs (list[SimulationInputOverride] | None): List of input overrides. + routine_external_id: The external ID of the simulation routine to be executed. + run_time: Reference timestamp used for data pre-processing and data sampling. + inputs: List of input overrides. """ task_type = "simulation" @@ -312,9 +312,9 @@ class TransformationTaskParameters(WorkflowTaskParameters): The transformation parameters are used to specify the transformation to be called. Args: - external_id (str): The external ID of the transformation to be called. - concurrency_policy (Literal['fail', 'restartAfterCurrent', 'waitForCurrent']): Determines the behavior of the task if the Transformation is already running. ``fail``: The task fails if another instance of the Transformation is currently running. ``waitForCurrent``: The task will pause and wait for the already running Transformation to complete. Once completed, the task is completed. This mode is useful for preventing redundant Transformation runs. ``restartAfterCurrent``: The task waits for the ongoing Transformation to finish. After completion, the task restarts the Transformation. This mode ensures that the most recent data can be used by following tasks. - use_transformation_credentials (bool): If set to `true`, the transformation will be run using the client credentials configured on the transformation. If set to `false`, the transformation will be run using the client credentials used to trigger the workflow. + external_id: The external ID of the transformation to be called. + concurrency_policy: Determines the behavior of the task if the Transformation is already running. ``fail``: The task fails if another instance of the Transformation is currently running. ``waitForCurrent``: The task will pause and wait for the already running Transformation to complete. Once completed, the task is completed. This mode is useful for preventing redundant Transformation runs. ``restartAfterCurrent``: The task waits for the ongoing Transformation to finish. After completion, the task restarts the Transformation. This mode ensures that the most recent data can be used by following tasks. + use_transformation_credentials: If set to `true`, the transformation will be run using the client credentials configured on the transformation. If set to `false`, the transformation will be run using the client credentials used to trigger the workflow. """ task_type = "transformation" @@ -354,11 +354,11 @@ class CDFTaskParameters(WorkflowTaskParameters): The CDF request parameters are used to specify a request to the Cognite Data Fusion API. Args: - resource_path (str): The resource path of the request. Note the path of the request which is prefixed by '{cluster}.cognitedata.com/api/v1/project/{project}' based on the cluster and project of the request. - method (Literal['GET', 'POST', 'PUT', 'DELETE'] | str): The HTTP method of the request. - query_parameters (dict | str | None): The query parameters of the request. Defaults to None. - body (dict | str | None): The body of the request. Defaults to None. Limited to 1024KiB in size - request_timeout_in_millis (int | str): The timeout of the request in milliseconds. Defaults to 10000. + resource_path: The resource path of the request. Note the path of the request which is prefixed by '{cluster}.cognitedata.com/api/v1/project/{project}' based on the cluster and project of the request. + method: The HTTP method of the request. + query_parameters: The query parameters of the request. Defaults to None. + body: The body of the request. Defaults to None. Limited to 1024KiB in size + request_timeout_in_millis: The timeout of the request in milliseconds. Defaults to 10000. Examples: @@ -420,7 +420,7 @@ class SubworkflowTaskParameters(WorkflowTaskParameters): dynamic task). Args: - tasks (list[WorkflowTask]): The tasks belonging to the subworkflow. + tasks: The tasks belonging to the subworkflow. """ task_type = "subworkflow" @@ -447,8 +447,8 @@ class SubworkflowReferenceParameters(WorkflowTaskParameters): The subworkflow reference is used to specifying a reference to another workflow which will be embedded into the execution at start time. Args: - workflow_external_id (str): The external ID of the referenced workflow. - version (str): The version of the referenced workflow. + workflow_external_id: The external ID of the referenced workflow. + version: The version of the referenced workflow. """ task_type = "subworkflow" @@ -490,9 +490,7 @@ class DynamicTaskParameters(WorkflowTaskParameters): - `${.input.someKey}`: A specific key within the input of the task with the given external id. Args: - tasks (list[WorkflowTask] | str): The tasks to be dynamically executed. The dynamic task is a string that is evaluated - during the workflow's execution. When calling Version Upsert, the tasks parameter must be a Reference string. - When calling Execution details, the tasks parameter will be a list of WorkflowTask objects. + tasks: The tasks to be dynamically executed. The dynamic task is a string that is evaluated during the workflow's execution. When calling Version Upsert, the tasks parameter must be a Reference string. When calling Execution details, the tasks parameter will be a list of WorkflowTask objects. """ task_type = "dynamic" @@ -527,14 +525,14 @@ class WorkflowTask(CogniteResource): Tasks do not distinguish between write and read versions. Args: - external_id (str): The external ID provided by the client. Must be unique for the resource type. - parameters (WorkflowTaskParameters): The parameters of the task. - name (str | None): The name of the task. Defaults to None. - description (str | None): The description of the task. Defaults to None. - retries (int): The number of retries for the task. Defaults to 3. - timeout (int): The timeout of the task in seconds. Defaults to 3600. - on_failure (Literal['abortWorkflow', 'skipTask']): The policy to handle failures and timeouts. Defaults to *abortWorkflow*. ``skipTask``: For both failures and timeouts, the task will retry until the retries are exhausted. After that, the Task is marked as COMPLETED_WITH_ERRORS and the subsequent tasks are executed. ``abortWorkflow``: In case of failures, retries will be performed until exhausted. After which the task is marked as FAILED and the Workflow is marked the same. In the event of a timeout, no retries are undertaken; the task is marked as TIMED_OUT and the Workflow is marked as FAILED. - depends_on (list[str] | None): The external ids of the tasks that this task depends on. Defaults to None. + external_id: The external ID provided by the client. Must be unique for the resource type. + parameters: The parameters of the task. + name: The name of the task. Defaults to None. + description: The description of the task. Defaults to None. + retries: The number of retries for the task. Defaults to 3. + timeout: The timeout of the task in seconds. Defaults to 3600. + on_failure: The policy to handle failures and timeouts. Defaults to *abortWorkflow*. ``skipTask``: For both failures and timeouts, the task will retry until the retries are exhausted. After that, the Task is marked as COMPLETED_WITH_ERRORS and the subsequent tasks are executed. ``abortWorkflow``: In case of failures, retries will be performed until exhausted. After which the task is marked as FAILED and the Workflow is marked the same. In the event of a timeout, no retries are undertaken; the task is marked as TIMED_OUT and the Workflow is marked as FAILED. + depends_on: The external ids of the tasks that this task depends on. Defaults to None. """ def __init__( @@ -634,9 +632,9 @@ class FunctionTaskOutput(WorkflowTaskOutput): The class represent the output of Cognite Function task. Args: - call_id (int | None): The call_id of the CDF Function call. - function_id (int | None): The function_id of the CDF Function. - response (dict | None): The response of the CDF Function call. + call_id: The call_id of the CDF Function call. + function_id: The function_id of the CDF Function. + response: The response of the CDF Function call. """ @@ -665,9 +663,9 @@ class SimulationTaskOutput(WorkflowTaskOutput): The class represent the output of Simulation execution. Args: - run_id (int | None): The run ID of the simulation run. - log_id (int | None): The log ID of the simulation run. - status_message (str | None): Status message of the simulation execution. + run_id: The run ID of the simulation run. + log_id: The log ID of the simulation run. + status_message: Status message of the simulation execution. """ task_type: ClassVar[str] = "simulation" @@ -707,7 +705,7 @@ class TransformationTaskOutput(WorkflowTaskOutput): The transformation output is used to specify the output of a transformation task. Args: - job_id (int | None): The job id of the transformation job. + job_id: The job id of the transformation job. """ task_type: ClassVar[str] = "transformation" @@ -729,8 +727,8 @@ class CDFTaskOutput(WorkflowTaskOutput): The CDF Request output is used to specify the output of a CDF Request. Args: - response (str | dict | None): The response of the CDF Request. Will be a JSON object if content-type is application/json, otherwise will be a string. - status_code (int | None): The status code of the CDF Request. + response: The response of the CDF Request. Will be a JSON object if content-type is application/json, otherwise will be a string. + status_code: The status code of the CDF Request. """ task_type: ClassVar[str] = "cdf" @@ -790,15 +788,15 @@ class WorkflowTaskExecution(CogniteResource): This class represents a task execution. Args: - id (str): The server generated id of the task execution. - external_id (str): The external ID provided by the client. Must be unique for the resource type. - status (TaskStatus): The status of the task execution. - input (WorkflowTaskParameters): The input parameters of the task execution. - output (WorkflowTaskOutput): The output of the task execution. - version (str | None): The version of the task execution. Defaults to None. - start_time (int | None): The start time of the task execution. Unix timestamp in milliseconds. Defaults to None. - end_time (int | None): The end time of the task execution. Unix timestamp in milliseconds. Defaults to None. - reason_for_incompletion (str | None): Provides the reason if the workflow did not complete successfully. Defaults to None. + id: The server generated id of the task execution. + external_id: The external ID provided by the client. Must be unique for the resource type. + status: The status of the task execution. + input: The input parameters of the task execution. + output: The output of the task execution. + version: The version of the task execution. Defaults to None. + start_time: The start time of the task execution. Unix timestamp in milliseconds. Defaults to None. + end_time: The end time of the task execution. Unix timestamp in milliseconds. Defaults to None. + reason_for_incompletion: Provides the reason if the workflow did not complete successfully. Defaults to None. """ def __init__( @@ -862,11 +860,8 @@ class WorkflowDefinitionCore(WriteableCogniteResource["WorkflowDefinitionUpsert" A workflow definition defines the tasks and order/dependencies of these tasks. Args: - tasks (list[WorkflowTask]): The tasks of the workflow definition. - description (str | None): The description of the workflow definition. Note that when updating a workflow definition - description, it will always be overwritten also if it is set to None. Meaning if the - workflow definition already has a description, and you want to keep it, you need to provide - the description when updating it. + tasks: The tasks of the workflow definition. + description: The description of the workflow definition. Note that when updating a workflow definition description, it will always be overwritten also if it is set to None. Meaning if the workflow definition already has a description, and you want to keep it, you need to provide the description when updating it. """ def __init__( @@ -898,11 +893,8 @@ class WorkflowDefinitionUpsert(WorkflowDefinitionCore): A workflow definition defines the tasks and order/dependencies of these tasks. Args: - tasks (list[WorkflowTask]): The tasks of the workflow definition. - description (str | None): The description of the workflow definition. Note that when updating a workflow definition - description, it will always be overwritten also if it is set to None. Meaning if the - workflow definition already has a description, and you want to keep it, you need to provide - the description when updating it. + tasks: The tasks of the workflow definition. + description: The description of the workflow definition. Note that when updating a workflow definition description, it will always be overwritten also if it is set to None. Meaning if the workflow definition already has a description, and you want to keep it, you need to provide the description when updating it. """ def __init__(self, tasks: list[WorkflowTask], description: str | None = None) -> None: @@ -933,9 +925,9 @@ class WorkflowDefinition(WorkflowDefinitionCore): A workflow definition defines the tasks and order/dependencies of these tasks. Args: - hash_ (str): The hash of the tasks and description. This is used to uniquely identify the workflow definition as you can overwrite a workflow version. - tasks (list[WorkflowTask]): The tasks of the workflow definition. - description (str | None): The description of the workflow definition. Defaults to None. + hash_: The hash of the tasks and description. This is used to uniquely identify the workflow definition as you can overwrite a workflow version. + tasks: The tasks of the workflow definition. + description: The description of the workflow definition. Defaults to None. """ def __init__( @@ -973,8 +965,8 @@ class WorkflowVersionCore(WriteableCogniteResource["WorkflowVersionUpsert"], ABC This class represents a workflow version. Args: - workflow_external_id (str): The external ID of the workflow. - version (str): The version of the workflow. + workflow_external_id: The external ID of the workflow. + version: The version of the workflow. """ def __init__( @@ -997,9 +989,9 @@ class WorkflowVersionUpsert(WorkflowVersionCore): This class represents a workflow version. This is the write-variant, used when creating or updating a workflow variant. Args: - workflow_external_id (str): The external ID of the workflow. - version (str): The version of the workflow. - workflow_definition (WorkflowDefinitionUpsert): The workflow definition of the workflow version. + workflow_external_id: The external ID of the workflow. + version: The version of the workflow. + workflow_definition: The workflow definition of the workflow version. """ @@ -1035,11 +1027,11 @@ class WorkflowVersion(WorkflowVersionCore): This class represents a workflow version. This is the read variant, used when retrieving/listing a workflow variant. Args: - workflow_external_id (str): The external ID of the workflow. - version (str): The version of the workflow. - workflow_definition (WorkflowDefinition): The workflow definition of the workflow version. - created_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. - last_updated_time (int): The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + workflow_external_id: The external ID of the workflow. + version: The version of the workflow. + workflow_definition: The workflow definition of the workflow version. + created_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. + last_updated_time: The number of milliseconds since 00:00:00 Thursday, 1 January 1970, Coordinated Universal Time (UTC), minus leap seconds. """ def __init__( @@ -1114,15 +1106,15 @@ class WorkflowExecution(CogniteResource): This class represents a workflow execution. Args: - id (str): The server generated id of the workflow execution. - workflow_external_id (str): The external ID of the workflow. - status (WorkflowStatus): The status of the workflow execution. - created_time (int): The time when the workflow execution was created. Unix timestamp in milliseconds. - version (str | None): The version of the workflow. Defaults to None. - start_time (int | None): The start time of the workflow execution. Unix timestamp in milliseconds. Defaults to None. - end_time (int | None): The end time of the workflow execution. Unix timestamp in milliseconds. Defaults to None. - reason_for_incompletion (str | None): Provides the reason if the workflow did not complete successfully. Defaults to None. - metadata (dict | None): Application specific metadata. + id: The server generated id of the workflow execution. + workflow_external_id: The external ID of the workflow. + status: The status of the workflow execution. + created_time: The time when the workflow execution was created. Unix timestamp in milliseconds. + version: The version of the workflow. Defaults to None. + start_time: The start time of the workflow execution. Unix timestamp in milliseconds. Defaults to None. + end_time: The end time of the workflow execution. Unix timestamp in milliseconds. Defaults to None. + reason_for_incompletion: Provides the reason if the workflow did not complete successfully. Defaults to None. + metadata: Application specific metadata. """ def __init__( @@ -1181,18 +1173,18 @@ class WorkflowExecutionDetailed(WorkflowExecution): it contains the workflow definition of the workflow. Args: - id (str): The server generated id of the workflow execution. - workflow_external_id (str): The external ID of the workflow. - workflow_definition (WorkflowDefinition): The workflow definition of the workflow. - status (WorkflowStatus): The status of the workflow execution. - executed_tasks (list[WorkflowTaskExecution]): The executed tasks of the workflow execution. - created_time (int): The time when the workflow execution was created. Unix timestamp in milliseconds. - version (str | None): The version of the workflow. Defaults to None. - start_time (int | None): The start time of the workflow execution. Unix timestamp in milliseconds. Defaults to None. - end_time (int | None): The end time of the workflow execution. Unix timestamp in milliseconds. Defaults to None. - reason_for_incompletion (str | None): Provides the reason if the workflow did not complete successfully. Defaults to None. - input (dict | None): Input arguments the workflow was triggered with. - metadata (dict | None): Metadata set when the workflow was triggered. + id: The server generated id of the workflow execution. + workflow_external_id: The external ID of the workflow. + workflow_definition: The workflow definition of the workflow. + status: The status of the workflow execution. + executed_tasks: The executed tasks of the workflow execution. + created_time: The time when the workflow execution was created. Unix timestamp in milliseconds. + version: The version of the workflow. Defaults to None. + start_time: The start time of the workflow execution. Unix timestamp in milliseconds. Defaults to None. + end_time: The end time of the workflow execution. Unix timestamp in milliseconds. Defaults to None. + reason_for_incompletion: Provides the reason if the workflow did not complete successfully. Defaults to None. + input: Input arguments the workflow was triggered with. + metadata: Metadata set when the workflow was triggered. """ def __init__( @@ -1269,8 +1261,8 @@ class WorkflowVersionId: This class represents a Workflow Version Identifier. Args: - workflow_external_id (str): The external ID of the workflow. - version (str, optional): The version of the workflow. Defaults to None. + workflow_external_id: The external ID of the workflow. + version: The version of the workflow. Defaults to None. """ workflow_external_id: str @@ -1377,8 +1369,8 @@ class WorkflowTriggerDataModelingQuery(Query): r"""This class represents a data modeling trigger query. Args: - with_ (dict[str, ResultSetExpression]): A dictionary of result set expressions to use in the query. The keys are used to reference the result set expressions in the select. - select (dict[str, Select]): A dictionary of select expressions to use in the query. The keys must match the keys in the with\_ dictionary. The select expressions define which properties to include in the result set. + with_: A dictionary of result set expressions to use in the query. The keys are used to reference the result set expressions in the select. + select: A dictionary of select expressions to use in the query. The keys must match the keys in the with\_ dictionary. The select expressions define which properties to include in the result set. """ def __init__( @@ -1404,9 +1396,8 @@ class WorkflowScheduledTriggerRule(WorkflowTriggerRule): This class represents a scheduled trigger rule. Args: - cron_expression (str): The cron specification for the scheduled trigger. - timezone (ZoneInfo | None): The timezone in which the scheduled trigger should be evaluated. - If not provided, UTC will be used as the default timezone on the server side. + cron_expression: The cron specification for the scheduled trigger. + timezone: The timezone in which the scheduled trigger should be evaluated. If not provided, UTC will be used as the default timezone on the server side. """ _trigger_type = "schedule" @@ -1436,9 +1427,9 @@ class WorkflowDataModelingTriggerRule(WorkflowTriggerRule): This class represents a data modeling trigger rule. Args: - data_modeling_query (WorkflowTriggerDataModelingQuery): The data modeling query of the trigger. - batch_size (int | None): The batch size of the trigger. - batch_timeout (int | None): The batch timeout of the trigger. + data_modeling_query: The data modeling query of the trigger. + batch_size: The batch size of the trigger. + batch_timeout: The batch timeout of the trigger. """ _trigger_type = "dataModeling" @@ -1484,10 +1475,10 @@ class WorkflowTriggerCore(WriteableCogniteResource["WorkflowTriggerUpsert"], ABC This class represents a base class for a workflow trigger. Args: - external_id (str): The external ID provided by the client. Must be unique for the resource type. - trigger_rule (WorkflowTriggerRule): The trigger rule of the workflow version trigger. - workflow_external_id (str): The external ID of the workflow. - workflow_version (str): The version of the workflow. + external_id: The external ID provided by the client. Must be unique for the resource type. + trigger_rule: The trigger rule of the workflow version trigger. + workflow_external_id: The external ID of the workflow. + workflow_version: The version of the workflow. """ def __init__( @@ -1504,12 +1495,12 @@ class WorkflowTriggerUpsert(WorkflowTriggerCore): This class represents a workflow trigger for upsertion. Args: - external_id (str): The external ID provided by the client. Must be unique for the resource type. - trigger_rule (WorkflowTriggerRule): The trigger rule of the workflow version trigger. - workflow_external_id (str): The external ID of the workflow. - workflow_version (str): The version of the workflow. - input (dict | None): The input data of the workflow version trigger. Defaults to None. - metadata (dict | None): Application specific metadata. Defaults to None. + external_id: The external ID provided by the client. Must be unique for the resource type. + trigger_rule: The trigger rule of the workflow version trigger. + workflow_external_id: The external ID of the workflow. + workflow_version: The version of the workflow. + input: The input data of the workflow version trigger. Defaults to None. + metadata: Application specific metadata. Defaults to None. """ def __init__( @@ -1567,15 +1558,15 @@ class WorkflowTrigger(WorkflowTriggerCore): This class represents a workflow trigger. Args: - external_id (str): The external ID provided by the client. Must be unique for the resource type. - trigger_rule (WorkflowTriggerRule): The trigger rule of the workflow version trigger. - workflow_external_id (str): The external ID of the workflow. - workflow_version (str): The version of the workflow. - is_paused (bool): Whether the trigger is paused. - input (dict | None): The input data passed to the workflow when an execution is started. - metadata (dict | None): Application specific metadata. - created_time (int): The time when the workflow version trigger was created. Unix timestamp in milliseconds. - last_updated_time (int): The time when the workflow version trigger was last updated. Unix timestamp in milliseconds. + external_id: The external ID provided by the client. Must be unique for the resource type. + trigger_rule: The trigger rule of the workflow version trigger. + workflow_external_id: The external ID of the workflow. + workflow_version: The version of the workflow. + is_paused: Whether the trigger is paused. + input: The input data passed to the workflow when an execution is started. + metadata: Application specific metadata. + created_time: The time when the workflow version trigger was created. Unix timestamp in milliseconds. + last_updated_time: The time when the workflow version trigger was last updated. Unix timestamp in milliseconds. """ def __init__( diff --git a/cognite/client/exceptions.py b/cognite/client/exceptions.py index bd9978e193..efa06bcefd 100644 --- a/cognite/client/exceptions.py +++ b/cognite/client/exceptions.py @@ -198,18 +198,18 @@ class CogniteAPIError(CogniteMultiException): failed to be processed (4xx). Args: - message (str): The error message produced by the API. - code (int): The error code produced by the failure. - x_request_id (str | None): The request-id generated for the failed request. - missing (list | None): List of missing identifiers. - duplicated (list | None): List of duplicated identifiers. - successful (list | None): List of items which were successfully processed. - failed (list | None): List of items which failed. - unknown (list | None): List of items which may or may not have been successfully processed. - skipped (list | None): List of items that were skipped due to "fail fast" mode. - cluster (str | None): Which Cognite cluster the user's project is on. - project (str | None): No description. - extra (dict | None): A dict of any additional information. + message: The error message produced by the API. + code: The error code produced by the failure. + x_request_id: The request-id generated for the failed request. + missing: List of missing identifiers. + duplicated: List of duplicated identifiers. + successful: List of items which were successfully processed. + failed: List of items which failed. + unknown: List of items which may or may not have been successfully processed. + skipped: List of items that were skipped due to "fail fast" mode. + cluster: Which Cognite cluster the user's project is on. + project: No description. + extra: A dict of any additional information. """ def __init__( @@ -301,8 +301,8 @@ class CogniteImportError(CogniteException, ImportError): Raised if the user attempts to use functionality which requires an uninstalled package. Args: - module (str): Name of the module which could not be imported - message (str | None): The error message to output. + module: Name of the module which could not be imported + message: The error message to output. """ def __init__(self, module: str, message: str | None = None) -> None: @@ -319,7 +319,7 @@ class CogniteMissingClientError(CogniteException): Raised if the user attempts to make use of a method which requires the cognite_client being set, but it is not. Args: - obj (Any): Object missing client reference. + obj: Object missing client reference. """ def __init__(self, obj: Any) -> None: @@ -350,9 +350,8 @@ class CogniteAssetHierarchyError(CogniteException): Raised if the given assets form an invalid hierarchy (by CDF standards). Args: - message (str): The error message to output. - hierarchy (AssetHierarchy): An instance of AssetHierarchy that holds various groups - of assets that failed validation. + message: The error message to output. + hierarchy: An instance of AssetHierarchy that holds various groups of assets that failed validation. """ def __init__(self, message: str, hierarchy: AssetHierarchy) -> None: diff --git a/cognite/client/testing.py b/cognite/client/testing.py index bc1d2e1ea3..f792d4888d 100644 --- a/cognite/client/testing.py +++ b/cognite/client/testing.py @@ -597,7 +597,7 @@ def monkeypatch_cognite_client() -> Iterator[CogniteClientMock]: Will patch all clients and replace them with specced MagicMock objects. Yields: - CogniteClientMock: The mock with which the CogniteClient has been replaced + The mock with which the CogniteClient has been replaced Examples: @@ -650,7 +650,7 @@ def monkeypatch_async_cognite_client() -> Iterator[AsyncCogniteClientMock]: Will patch all clients and replace them with specced MagicMock objects. Yields: - AsyncCogniteClientMock: The mock with which the AsyncCogniteClient has been replaced + The mock with which the AsyncCogniteClient has been replaced Examples: diff --git a/cognite/client/utils/_concurrency.py b/cognite/client/utils/_concurrency.py index 5290519b52..d6ba5ef0fd 100644 --- a/cognite/client/utils/_concurrency.py +++ b/cognite/client/utils/_concurrency.py @@ -28,11 +28,11 @@ class ConcurrencyConfig(ABC): Abstract base class for concurrency settings. Args: - concurrency_settings (ConcurrencySettings): Reference to the parent settings object, used to check if settings are frozen. - api_name (str): Which API these settings apply to (e.g. "data_modeling", "datapoints", etc.). - read (int): Maximum number of concurrent generic read requests. - write (int): Maximum number of concurrent generic write requests. - delete (int): Maximum number of concurrent generic delete requests. + concurrency_settings: Reference to the parent settings object, used to check if settings are frozen. + api_name: Which API these settings apply to (e.g. "data_modeling", "datapoints", etc.). + read: Maximum number of concurrent generic read requests. + write: Maximum number of concurrent generic write requests. + delete: Maximum number of concurrent generic delete requests. """ def __init__( @@ -90,11 +90,11 @@ class CRUDConcurrency(ConcurrencyConfig): Basic concurrency settings, only differentiating on CRUD operation types. Args: - concurrency_settings (ConcurrencySettings): Reference to the parent settings object, used to check if settings are frozen. - api_name (str): Which API these settings apply to (e.g. "data_modeling", "datapoints", etc.). - read (int): Maximum number of concurrent read requests (list, retrieve, search, etc.). - write (int): Maximum number of concurrent write requests (create, update, upsert, etc.). - delete (int): Maximum number of concurrent delete requests. + concurrency_settings: Reference to the parent settings object, used to check if settings are frozen. + api_name: Which API these settings apply to (e.g. "data_modeling", "datapoints", etc.). + read: Maximum number of concurrent read requests (list, retrieve, search, etc.). + write: Maximum number of concurrent write requests (create, update, upsert, etc.). + delete: Maximum number of concurrent delete requests. """ @cache @@ -126,14 +126,13 @@ class DataModelingConcurrencyConfig(ConcurrencyConfig): Schema operations involve any call to views, data models and containers. Args: - concurrency_settings (ConcurrencySettings): Reference to the parent settings object, used to check if settings are frozen. - read (int): Maximum number of concurrent non-schema read requests. Mostly covers instance operations: query, retrieve, list, - sync and inspect, but also graphQL instance queries. - write (int): Maximum number of concurrent non-schema write requests, currently only instances -> apply. - delete (int): Maximum number of concurrent non-schema delete requests, currently only instances -> delete. - search (int): Maximum number of concurrent search and aggregation requests for instances. - read_schema (int): Maximum number of concurrent schema read requests (views, data models, containers and spaces), as well as calls to statistics. - write_schema (int): Maximum number of concurrent schema write requests (views, data models, containers and spaces). + concurrency_settings: Reference to the parent settings object, used to check if settings are frozen. + read: Maximum number of concurrent non-schema read requests. Mostly covers instance operations: query, retrieve, list, sync and inspect, but also graphQL instance queries. + write: Maximum number of concurrent non-schema write requests, currently only instances -> apply. + delete: Maximum number of concurrent non-schema delete requests, currently only instances -> delete. + search: Maximum number of concurrent search and aggregation requests for instances. + read_schema: Maximum number of concurrent schema read requests (views, data models, containers and spaces), as well as calls to statistics. + write_schema: Maximum number of concurrent schema write requests (views, data models, containers and spaces). """ def __init__( diff --git a/cognite/client/utils/_pyodide_helpers.py b/cognite/client/utils/_pyodide_helpers.py index e49eefaafa..5871d1f2e2 100644 --- a/cognite/client/utils/_pyodide_helpers.py +++ b/cognite/client/utils/_pyodide_helpers.py @@ -53,7 +53,7 @@ class EnvVarToken(CredentialProvider): allowing refreshing the value by another entity. Args: - key (str): The name of the env.var. to read from. Default: 'COGNITE_TOKEN' + key: The name of the env.var. to read from. Default: 'COGNITE_TOKEN' Raises: KeyError: If the env.var. is not set. """ diff --git a/cognite/client/utils/_retry.py b/cognite/client/utils/_retry.py index 8a3205c935..ef37a6b187 100644 --- a/cognite/client/utils/_retry.py +++ b/cognite/client/utils/_retry.py @@ -9,9 +9,9 @@ class Backoff(Iterator[float]): described in this post: https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/ Args: - multiplier (float): No description. - max_wait (float): No description. - base (int): No description.""" + multiplier: No description. + max_wait: No description. + base: No description.""" def __init__(self, multiplier: float = 0.5, max_wait: float = 60.0, base: int = 2) -> None: self._multiplier = multiplier diff --git a/cognite/client/utils/_time.py b/cognite/client/utils/_time.py index 9ebe33e12d..6cb1323b92 100644 --- a/cognite/client/utils/_time.py +++ b/cognite/client/utils/_time.py @@ -106,10 +106,10 @@ def datetime_to_ms(dt: datetime) -> int: """Converts a datetime object to milliseconds since epoch. Args: - dt (datetime): Naive or aware datetime object. Naive datetimes are interpreted as local time. + dt: Naive or aware datetime object. Naive datetimes are interpreted as local time. Returns: - int: Milliseconds since epoch (negative for time prior to 1970-01-01) + Milliseconds since epoch (negative for time prior to 1970-01-01) """ try: return int(1000 * dt.timestamp()) @@ -125,13 +125,13 @@ def ms_to_datetime(ms: int | float) -> datetime: """Converts valid Cognite timestamps, i.e. milliseconds since epoch, to datetime object. Args: - ms (int | float): Milliseconds since epoch. + ms: Milliseconds since epoch. Raises: ValueError: On invalid Cognite timestamps. Returns: - datetime: Aware datetime object in UTC. + Aware datetime object in UTC. """ if not (MIN_TIMESTAMP_MS <= ms <= MAX_TIMESTAMP_MS): raise ValueError(f"Input {ms=} does not satisfy: {MIN_TIMESTAMP_MS} <= ms <= {MAX_TIMESTAMP_MS}") @@ -144,13 +144,13 @@ def datetime_to_ms_iso_timestamp(dt: datetime) -> str: """Converts a datetime object to a string representing a timestamp in the ISO-format expected by the Cognite Data Modeling APIs. Args: - dt (datetime): Naive or aware datetime object. Naive datetimes are interpreted as local time. + dt: Naive or aware datetime object. Naive datetimes are interpreted as local time. Raises: TypeError: If dt is not a datetime object Returns: - str: Timestamp string in ISO 8601 format with milliseconds + Timestamp string in ISO 8601 format with milliseconds """ if isinstance(dt, datetime): if dt.tzinfo is None: @@ -163,10 +163,10 @@ def convert_data_modeling_timestamp(timestamp: str) -> datetime: """Converts a timestamp string to a datetime object. Args: - timestamp (str): A timestamp string. + timestamp: A timestamp string. Returns: - datetime: A datetime object. + A datetime object. """ try: return datetime.fromisoformat(timestamp) @@ -232,10 +232,10 @@ def timestamp_to_ms(timestamp: int | float | str | datetime) -> int: """Returns the ms representation of some timestamp given by milliseconds, time-shift format or datetime object Args: - timestamp (int | float | str | datetime): Convert this timestamp to ms. + timestamp: Convert this timestamp to ms. Returns: - int: Milliseconds since epoch representation of timestamp + Milliseconds since epoch representation of timestamp Examples: diff --git a/cognite/client/utils/_uploading.py b/cognite/client/utils/_uploading.py index 62a29416ee..72c134b8da 100644 --- a/cognite/client/utils/_uploading.py +++ b/cognite/client/utils/_uploading.py @@ -48,7 +48,7 @@ class AsyncFileChunker(AsyncIterator[bytes]): file handles in a way that doesn't involve HTTP multipart encoding (as opposed to requests). Args: - file_handle (BinaryIO): An open file handle. + file_handle: An open file handle. """ CHUNK_SIZE = 64 * 1024 # 64 KiB chunks by default, copying httpx default diff --git a/docs/source/conf.py b/docs/source/conf.py index a56d3fe538..8d555a774b 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -28,7 +28,13 @@ # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. -extensions = ["sphinx.ext.autodoc", "sphinx.ext.doctest", "sphinx.ext.napoleon", "sphinx.ext.autosectionlabel"] +extensions = [ + "sphinx.ext.autodoc", + "sphinx.ext.doctest", + "sphinx.ext.napoleon", + "sphinx.ext.autosectionlabel", + "sphinx_autodoc_typehints", +] autosectionlabel_prefix_document = True @@ -179,3 +185,7 @@ def analyze_but_skip_overloads(self): ModuleAnalyzer.analyze = analyze_but_skip_overloads + +# Options for how to autodoc type hints +typehints_use_signature = True +always_use_bars_union = True diff --git a/poetry.lock b/poetry.lock index 27e7ba3eb9..4254307478 100644 --- a/poetry.lock +++ b/poetry.lock @@ -649,14 +649,14 @@ tests = ["asttokens (>=2.1.0)", "coverage", "coverage-enable-subprocess", "ipyth [[package]] name = "filelock" -version = "3.24.2" +version = "3.24.3" description = "A platform independent file lock." optional = false python-versions = ">=3.10" groups = ["dev"] files = [ - {file = "filelock-3.24.2-py3-none-any.whl", hash = "sha256:667d7dc0b7d1e1064dd5f8f8e80bdac157a6482e8d2e02cd16fd3b6b33bd6556"}, - {file = "filelock-3.24.2.tar.gz", hash = "sha256:c22803117490f156e59fafce621f0550a7a853e2bbf4f87f112b11d469b6c81b"}, + {file = "filelock-3.24.3-py3-none-any.whl", hash = "sha256:426e9a4660391f7f8a810d71b0555bce9008b0a1cc342ab1f6947d37639e002d"}, + {file = "filelock-3.24.3.tar.gz", hash = "sha256:011a5644dc937c22699943ebbfc46e969cdde3e171470a6e40b9533e5a72affa"}, ] [[package]] @@ -1056,7 +1056,7 @@ description = "Low-level, pure Python DBus protocol wrapper." optional = false python-versions = ">=3.7" groups = ["dev"] -markers = "platform_machine != \"ppc64le\" and platform_machine != \"s390x\" and sys_platform == \"linux\"" +markers = "sys_platform == \"linux\" and platform_machine != \"ppc64le\" and platform_machine != \"s390x\"" files = [ {file = "jeepney-0.9.0-py3-none-any.whl", hash = "sha256:97e5714520c16fc0a45695e5365a2e11b81ea79bba796e26f9f1d178cb182683"}, {file = "jeepney-0.9.0.tar.gz", hash = "sha256:cf0e9e845622b81e4a28df94c40345400256ec608d0e55bb8a3feaa9163f5732"}, @@ -1724,7 +1724,7 @@ description = "Powerful data structures for data analysis, time series, and stat optional = true python-versions = ">=3.9" groups = ["main"] -markers = "extra == \"pandas\" or extra == \"all\" or extra == \"geo\"" +markers = "python_version == \"3.10\" and (extra == \"pandas\" or extra == \"all\" or extra == \"geo\")" files = [ {file = "pandas-2.3.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:376c6446ae31770764215a6c937f72d917f214b43560603cd60da6408f183b6c"}, {file = "pandas-2.3.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e19d192383eab2f4ceb30b412b22ea30690c9e618f78870357ae1d682912015a"}, @@ -1784,11 +1784,7 @@ files = [ ] [package.dependencies] -numpy = [ - {version = ">=1.22.4", markers = "python_version < \"3.11\""}, - {version = ">=1.23.2", markers = "python_version == \"3.11\""}, - {version = ">=1.26.0", markers = "python_version >= \"3.12\""}, -] +numpy = {version = ">=1.22.4", markers = "python_version < \"3.11\""} python-dateutil = ">=2.8.2" pytz = ">=2020.1" tzdata = ">=2022.7" @@ -1818,6 +1814,99 @@ sql-other = ["SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "adbc-d test = ["hypothesis (>=6.46.1)", "pytest (>=7.3.2)", "pytest-xdist (>=2.2.0)"] xml = ["lxml (>=4.9.2)"] +[[package]] +name = "pandas" +version = "3.0.1" +description = "Powerful data structures for data analysis, time series, and statistics" +optional = true +python-versions = ">=3.11" +groups = ["main"] +markers = "python_version >= \"3.11\" and (extra == \"pandas\" or extra == \"all\" or extra == \"geo\")" +files = [ + {file = "pandas-3.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:de09668c1bf3b925c07e5762291602f0d789eca1b3a781f99c1c78f6cac0e7ea"}, + {file = "pandas-3.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:24ba315ba3d6e5806063ac6eb717504e499ce30bd8c236d8693a5fd3f084c796"}, + {file = "pandas-3.0.1-cp311-cp311-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:406ce835c55bac912f2a0dcfaf27c06d73c6b04a5dde45f1fd3169ce31337389"}, + {file = "pandas-3.0.1-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:830994d7e1f31dd7e790045235605ab61cff6c94defc774547e8b7fdfbff3dc7"}, + {file = "pandas-3.0.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:a64ce8b0f2de1d2efd2ae40b0abe7f8ae6b29fbfb3812098ed5a6f8e235ad9bf"}, + {file = "pandas-3.0.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:9832c2c69da24b602c32e0c7b1b508a03949c18ba08d4d9f1c1033426685b447"}, + {file = "pandas-3.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:84f0904a69e7365f79a0c77d3cdfccbfb05bf87847e3a51a41e1426b0edb9c79"}, + {file = "pandas-3.0.1-cp311-cp311-win_arm64.whl", hash = "sha256:4a68773d5a778afb31d12e34f7dd4612ab90de8c6fb1d8ffe5d4a03b955082a1"}, + {file = "pandas-3.0.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:476f84f8c20c9f5bc47252b66b4bb25e1a9fc2fa98cead96744d8116cb85771d"}, + {file = "pandas-3.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0ab749dfba921edf641d4036c4c21c0b3ea70fea478165cb98a998fb2a261955"}, + {file = "pandas-3.0.1-cp312-cp312-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b8e36891080b87823aff3640c78649b91b8ff6eea3c0d70aeabd72ea43ab069b"}, + {file = "pandas-3.0.1-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:532527a701281b9dd371e2f582ed9094f4c12dd9ffb82c0c54ee28d8ac9520c4"}, + {file = "pandas-3.0.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:356e5c055ed9b0da1580d465657bc7d00635af4fd47f30afb23025352ba764d1"}, + {file = "pandas-3.0.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:9d810036895f9ad6345b8f2a338dd6998a74e8483847403582cab67745bff821"}, + {file = "pandas-3.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:536232a5fe26dd989bd633e7a0c450705fdc86a207fec7254a55e9a22950fe43"}, + {file = "pandas-3.0.1-cp312-cp312-win_arm64.whl", hash = "sha256:0f463ebfd8de7f326d38037c7363c6dacb857c5881ab8961fb387804d6daf2f7"}, + {file = "pandas-3.0.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:5272627187b5d9c20e55d27caf5f2cd23e286aba25cadf73c8590e432e2b7262"}, + {file = "pandas-3.0.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:661e0f665932af88c7877f31da0dc743fe9c8f2524bdffe23d24fdcb67ef9d56"}, + {file = "pandas-3.0.1-cp313-cp313-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:75e6e292ff898679e47a2199172593d9f6107fd2dd3617c22c2946e97d5df46e"}, + {file = "pandas-3.0.1-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1ff8cf1d2896e34343197685f432450ec99a85ba8d90cce2030c5eee2ef98791"}, + {file = "pandas-3.0.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:eca8b4510f6763f3d37359c2105df03a7a221a508f30e396a51d0713d462e68a"}, + {file = "pandas-3.0.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:06aff2ad6f0b94a17822cf8b83bbb563b090ed82ff4fe7712db2ce57cd50d9b8"}, + {file = "pandas-3.0.1-cp313-cp313-win_amd64.whl", hash = "sha256:9fea306c783e28884c29057a1d9baa11a349bbf99538ec1da44c8476563d1b25"}, + {file = "pandas-3.0.1-cp313-cp313-win_arm64.whl", hash = "sha256:a8d37a43c52917427e897cb2e429f67a449327394396a81034a4449b99afda59"}, + {file = "pandas-3.0.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:d54855f04f8246ed7b6fc96b05d4871591143c46c0b6f4af874764ed0d2d6f06"}, + {file = "pandas-3.0.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:4e1b677accee34a09e0dc2ce5624e4a58a1870ffe56fc021e9caf7f23cd7668f"}, + {file = "pandas-3.0.1-cp313-cp313t-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a9cabbdcd03f1b6cd254d6dda8ae09b0252524be1592594c00b7895916cb1324"}, + {file = "pandas-3.0.1-cp313-cp313t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5ae2ab1f166668b41e770650101e7090824fd34d17915dd9cd479f5c5e0065e9"}, + {file = "pandas-3.0.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:6bf0603c2e30e2cafac32807b06435f28741135cb8697eae8b28c7d492fc7d76"}, + {file = "pandas-3.0.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:6c426422973973cae1f4a23e51d4ae85974f44871b24844e4f7de752dd877098"}, + {file = "pandas-3.0.1-cp313-cp313t-win_amd64.whl", hash = "sha256:b03f91ae8c10a85c1613102c7bef5229b5379f343030a3ccefeca8a33414cf35"}, + {file = "pandas-3.0.1-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:99d0f92ed92d3083d140bf6b97774f9f13863924cf3f52a70711f4e7588f9d0a"}, + {file = "pandas-3.0.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:3b66857e983208654294bb6477b8a63dee26b37bdd0eb34d010556e91261784f"}, + {file = "pandas-3.0.1-cp314-cp314-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:56cf59638bf24dc9bdf2154c81e248b3289f9a09a6d04e63608c159022352749"}, + {file = "pandas-3.0.1-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c1a9f55e0f46951874b863d1f3906dcb57df2d9be5c5847ba4dfb55b2c815249"}, + {file = "pandas-3.0.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:1849f0bba9c8a2fb0f691d492b834cc8dadf617e29015c66e989448d58d011ee"}, + {file = "pandas-3.0.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:c3d288439e11b5325b02ae6e9cc83e6805a62c40c5a6220bea9beb899c073b1c"}, + {file = "pandas-3.0.1-cp314-cp314-win_amd64.whl", hash = "sha256:93325b0fe372d192965f4cca88d97667f49557398bbf94abdda3bf1b591dbe66"}, + {file = "pandas-3.0.1-cp314-cp314-win_arm64.whl", hash = "sha256:97ca08674e3287c7148f4858b01136f8bdfe7202ad25ad04fec602dd1d29d132"}, + {file = "pandas-3.0.1-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:58eeb1b2e0fb322befcf2bbc9ba0af41e616abadb3d3414a6bc7167f6cbfce32"}, + {file = "pandas-3.0.1-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:cd9af1276b5ca9e298bd79a26bda32fa9cc87ed095b2a9a60978d2ca058eaf87"}, + {file = "pandas-3.0.1-cp314-cp314t-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:94f87a04984d6b63788327cd9f79dda62b7f9043909d2440ceccf709249ca988"}, + {file = "pandas-3.0.1-cp314-cp314t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:85fe4c4df62e1e20f9db6ebfb88c844b092c22cd5324bdcf94bfa2fc1b391221"}, + {file = "pandas-3.0.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:331ca75a2f8672c365ae25c0b29e46f5ac0c6551fdace8eec4cd65e4fac271ff"}, + {file = "pandas-3.0.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:15860b1fdb1973fffade772fdb931ccf9b2f400a3f5665aef94a00445d7d8dd5"}, + {file = "pandas-3.0.1-cp314-cp314t-win_amd64.whl", hash = "sha256:44f1364411d5670efa692b146c748f4ed013df91ee91e9bec5677fb1fd58b937"}, + {file = "pandas-3.0.1-cp314-cp314t-win_arm64.whl", hash = "sha256:108dd1790337a494aa80e38def654ca3f0968cf4f362c85f44c15e471667102d"}, + {file = "pandas-3.0.1.tar.gz", hash = "sha256:4186a699674af418f655dbd420ed87f50d56b4cd6603784279d9eef6627823c8"}, +] + +[package.dependencies] +numpy = [ + {version = ">=1.26.0", markers = "python_version < \"3.14\""}, + {version = ">=2.3.3", markers = "python_version >= \"3.14\""}, +] +python-dateutil = ">=2.8.2" +tzdata = {version = "*", markers = "sys_platform == \"win32\" or sys_platform == \"emscripten\""} + +[package.extras] +all = ["PyQt5 (>=5.15.9)", "SQLAlchemy (>=2.0.36)", "adbc-driver-postgresql (>=1.2.0)", "adbc-driver-sqlite (>=1.2.0)", "beautifulsoup4 (>=4.12.3)", "bottleneck (>=1.4.2)", "fastparquet (>=2024.11.0)", "fsspec (>=2024.10.0)", "gcsfs (>=2024.10.0)", "html5lib (>=1.1)", "hypothesis (>=6.116.0)", "jinja2 (>=3.1.5)", "lxml (>=5.3.0)", "matplotlib (>=3.9.3)", "numba (>=0.60.0)", "numexpr (>=2.10.2)", "odfpy (>=1.4.1)", "openpyxl (>=3.1.5)", "psycopg2 (>=2.9.10)", "pyarrow (>=13.0.0)", "pyiceberg (>=0.8.1)", "pymysql (>=1.1.1)", "pyreadstat (>=1.2.8)", "pytest (>=8.3.4)", "pytest-xdist (>=3.6.1)", "python-calamine (>=0.3.0)", "pytz (>=2024.2)", "pyxlsb (>=1.0.10)", "qtpy (>=2.4.2)", "s3fs (>=2024.10.0)", "scipy (>=1.14.1)", "tables (>=3.10.1)", "tabulate (>=0.9.0)", "xarray (>=2024.10.0)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.2.0)", "zstandard (>=0.23.0)"] +aws = ["s3fs (>=2024.10.0)"] +clipboard = ["PyQt5 (>=5.15.9)", "qtpy (>=2.4.2)"] +compression = ["zstandard (>=0.23.0)"] +computation = ["scipy (>=1.14.1)", "xarray (>=2024.10.0)"] +excel = ["odfpy (>=1.4.1)", "openpyxl (>=3.1.5)", "python-calamine (>=0.3.0)", "pyxlsb (>=1.0.10)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.2.0)"] +feather = ["pyarrow (>=13.0.0)"] +fss = ["fsspec (>=2024.10.0)"] +gcp = ["gcsfs (>=2024.10.0)"] +hdf5 = ["tables (>=3.10.1)"] +html = ["beautifulsoup4 (>=4.12.3)", "html5lib (>=1.1)", "lxml (>=5.3.0)"] +iceberg = ["pyiceberg (>=0.8.1)"] +mysql = ["SQLAlchemy (>=2.0.36)", "pymysql (>=1.1.1)"] +output-formatting = ["jinja2 (>=3.1.5)", "tabulate (>=0.9.0)"] +parquet = ["pyarrow (>=13.0.0)"] +performance = ["bottleneck (>=1.4.2)", "numba (>=0.60.0)", "numexpr (>=2.10.2)"] +plot = ["matplotlib (>=3.9.3)"] +postgresql = ["SQLAlchemy (>=2.0.36)", "adbc-driver-postgresql (>=1.2.0)", "psycopg2 (>=2.9.10)"] +pyarrow = ["pyarrow (>=13.0.0)"] +spss = ["pyreadstat (>=1.2.8)"] +sql-other = ["SQLAlchemy (>=2.0.36)", "adbc-driver-postgresql (>=1.2.0)", "adbc-driver-sqlite (>=1.2.0)"] +test = ["hypothesis (>=6.116.0)", "pytest (>=8.3.4)", "pytest-xdist (>=3.6.1)"] +timezone = ["pytz (>=2024.2)"] +xml = ["lxml (>=5.3.0)"] + [[package]] name = "parso" version = "0.8.6" @@ -2037,7 +2126,7 @@ description = "C parser in Python" optional = false python-versions = ">=3.10" groups = ["main", "dev"] -markers = "platform_python_implementation != \"PyPy\" and implementation_name != \"PyPy\"" +markers = "implementation_name != \"PyPy\" and platform_python_implementation != \"PyPy\"" files = [ {file = "pycparser-3.0-py3-none-any.whl", hash = "sha256:b727414169a36b7d524c1c3e31839a521725078d7b2ff038656844266160a992"}, {file = "pycparser-3.0.tar.gz", hash = "sha256:600f49d217304a5902ac3c37e1281c9fe94e4d0489de643a9504c5cdfdfc6b29"}, @@ -2427,7 +2516,7 @@ description = "World timezone definitions, modern and historical" optional = true python-versions = "*" groups = ["main"] -markers = "extra == \"pandas\" or extra == \"all\" or extra == \"geo\"" +markers = "python_version == \"3.10\" and (extra == \"pandas\" or extra == \"all\" or extra == \"geo\")" files = [ {file = "pytz-2025.2-py2.py3-none-any.whl", hash = "sha256:5ddf76296dd8c44c26eb8f4b6f35488f3ccbf6fbbd7adee0b7262d43f0ec2f00"}, {file = "pytz-2025.2.tar.gz", hash = "sha256:360b9e3dbb49a209c21ad61809c7fb453643e048b38924c765813546746e81c3"}, @@ -2440,7 +2529,7 @@ description = "A (partial) reimplementation of pywin32 using ctypes/cffi" optional = false python-versions = ">=3.6" groups = ["dev"] -markers = "platform_machine != \"ppc64le\" and platform_machine != \"s390x\" and sys_platform == \"win32\"" +markers = "sys_platform == \"win32\" and platform_machine != \"ppc64le\" and platform_machine != \"s390x\"" files = [ {file = "pywin32-ctypes-0.2.3.tar.gz", hash = "sha256:d162dc04946d704503b2edc4d55f3dba5c1d539ead017afa00142c38b9885755"}, {file = "pywin32_ctypes-0.2.3-py3-none-any.whl", hash = "sha256:8a1513379d709975552d202d942d9837758905c8d01eb82b8bcc30918929e7b8"}, @@ -2670,7 +2759,7 @@ description = "Python bindings to FreeDesktop.org Secret Service API" optional = false python-versions = ">=3.10" groups = ["dev"] -markers = "platform_machine != \"ppc64le\" and platform_machine != \"s390x\" and sys_platform == \"linux\"" +markers = "sys_platform == \"linux\" and platform_machine != \"ppc64le\" and platform_machine != \"s390x\"" files = [ {file = "secretstorage-3.5.0-py3-none-any.whl", hash = "sha256:0ce65888c0725fcb2c5bc0fdb8e5438eece02c523557ea40ce0703c266248137"}, {file = "secretstorage-3.5.0.tar.gz", hash = "sha256:f04b8e4689cbce351744d5537bf6b1329c6fc68f91fa666f60a380edddcd11be"}, @@ -2881,6 +2970,66 @@ sphinxcontrib-jsmath = ">=1.0.1" sphinxcontrib-qthelp = ">=1.0.6" sphinxcontrib-serializinghtml = ">=1.1.9" +[[package]] +name = "sphinx-autodoc-typehints" +version = "3.0.1" +description = "Type hints (PEP 484) support for the Sphinx autodoc extension" +optional = false +python-versions = ">=3.10" +groups = ["docs"] +markers = "python_version == \"3.10\"" +files = [ + {file = "sphinx_autodoc_typehints-3.0.1-py3-none-any.whl", hash = "sha256:4b64b676a14b5b79cefb6628a6dc8070e320d4963e8ff640a2f3e9390ae9045a"}, + {file = "sphinx_autodoc_typehints-3.0.1.tar.gz", hash = "sha256:b9b40dd15dee54f6f810c924f863f9cf1c54f9f3265c495140ea01be7f44fa55"}, +] + +[package.dependencies] +sphinx = ">=8.1.3" + +[package.extras] +docs = ["furo (>=2024.8.6)"] +testing = ["covdefaults (>=2.3)", "coverage (>=7.6.10)", "defusedxml (>=0.7.1)", "diff-cover (>=9.2.1)", "pytest (>=8.3.4)", "pytest-cov (>=6)", "sphobjinv (>=2.3.1.2)", "typing-extensions (>=4.12.2)"] + +[[package]] +name = "sphinx-autodoc-typehints" +version = "3.6.1" +description = "Type hints (PEP 484) support for the Sphinx autodoc extension" +optional = false +python-versions = ">=3.11" +groups = ["docs"] +markers = "python_version == \"3.11\"" +files = [ + {file = "sphinx_autodoc_typehints-3.6.1-py3-none-any.whl", hash = "sha256:dd818ba31d4c97f219a8c0fcacef280424f84a3589cedcb73003ad99c7da41ca"}, + {file = "sphinx_autodoc_typehints-3.6.1.tar.gz", hash = "sha256:fa0b686ae1b85965116c88260e5e4b82faec3687c2e94d6a10f9b36c3743e2fe"}, +] + +[package.dependencies] +sphinx = ">=9.0.4" + +[package.extras] +docs = ["furo (>=2025.9.25)"] +testing = ["covdefaults (>=2.3)", "coverage (>=7.13)", "defusedxml (>=0.7.1)", "diff-cover (>=9.7.2)", "pytest (>=9.0.2)", "pytest-cov (>=7)", "sphobjinv (>=2.3.1.3)", "typing-extensions (>=4.15)"] + +[[package]] +name = "sphinx-autodoc-typehints" +version = "3.6.3" +description = "Type hints (PEP 484) support for the Sphinx autodoc extension" +optional = false +python-versions = ">=3.12" +groups = ["docs"] +markers = "python_version >= \"3.12\"" +files = [ + {file = "sphinx_autodoc_typehints-3.6.3-py3-none-any.whl", hash = "sha256:46ebc68fa85b320d55887a8d836a01e12e3b7744da973e70af8cedc74072aad5"}, + {file = "sphinx_autodoc_typehints-3.6.3.tar.gz", hash = "sha256:6c387b47d9ad5e75b157810af5bad46901f0a22708ed5e4adf466885a9c60910"}, +] + +[package.dependencies] +sphinx = ">=9.1" + +[package.extras] +docs = ["furo (>=2025.12.19)"] +testing = ["covdefaults (>=2.3)", "coverage (>=7.13.4)", "defusedxml (>=0.7.1)", "diff-cover (>=10.2)", "pytest (>=9.0.2)", "pytest-cov (>=7)", "sphobjinv (>=2.3.1.3)", "typing-extensions (>=4.15)"] + [[package]] name = "sphinx-rtd-theme" version = "3.1.0" @@ -3304,7 +3453,7 @@ description = "Provider of IANA time zone data" optional = false python-versions = ">=2" groups = ["main"] -markers = "platform_system == \"Windows\" and extra == \"pyodide\" or extra == \"pandas\" or extra == \"all\" or extra == \"geo\"" +markers = "(sys_platform == \"win32\" or sys_platform == \"emscripten\" or extra == \"pyodide\" or python_version == \"3.10\") and (sys_platform == \"win32\" or sys_platform == \"emscripten\" or platform_system == \"Windows\" or python_version == \"3.10\") and (platform_system == \"Windows\" or extra == \"pandas\" or extra == \"all\" or extra == \"geo\") and (extra == \"pyodide\" or extra == \"pandas\" or extra == \"all\" or extra == \"geo\")" files = [ {file = "tzdata-2025.3-py2.py3-none-any.whl", hash = "sha256:06a47e5700f3081aab02b2e513160914ff0694bce9947d6b76ebd6bf57cfc5d1"}, {file = "tzdata-2025.3.tar.gz", hash = "sha256:de39c2ca5dc7b0344f2eba86f49d614019d29f060fc4ebc8a417896a620b56a7"}, @@ -3330,14 +3479,14 @@ zstd = ["backports-zstd (>=1.0.0) ; python_version < \"3.14\""] [[package]] name = "virtualenv" -version = "20.37.0" +version = "20.38.0" description = "Virtual Python Environment builder" optional = false python-versions = ">=3.8" groups = ["dev"] files = [ - {file = "virtualenv-20.37.0-py3-none-any.whl", hash = "sha256:5d3951c32d57232ae3569d4de4cc256c439e045135ebf43518131175d9be435d"}, - {file = "virtualenv-20.37.0.tar.gz", hash = "sha256:6f7e2064ed470aa7418874e70b6369d53b66bcd9e9fd5389763e96b6c94ccb7c"}, + {file = "virtualenv-20.38.0-py3-none-any.whl", hash = "sha256:d6e78e5889de3a4742df2d3d44e779366325a90cf356f15621fddace82431794"}, + {file = "virtualenv-20.38.0.tar.gz", hash = "sha256:94f39b1abaea5185bf7ea5a46702b56f1d0c9aa2f41a6c2b8b0af4ddc74c10a7"}, ] [package.dependencies] @@ -3396,4 +3545,4 @@ yaml = ["PyYAML"] [metadata] lock-version = "2.1" python-versions = "^3.10" -content-hash = "66f6fc71f1c40772a5069a2ada922ffc9cb2a40b2fa32723eb3110339a2c44f7" +content-hash = "22a504e10d6fbe968db746a4433f2da02f98200959ea9ad5fbc1c0c29f6cac3c" diff --git a/pyproject.toml b/pyproject.toml index 736f90b788..1f18b95d76 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -91,6 +91,7 @@ anyio = "^4.11.0" [tool.poetry.group.docs.dependencies] sphinx = ">=7.2" sphinx-rtd-theme = ">=1" +sphinx_autodoc_typehints = ">=3.0.1" [build-system] requires = ["poetry-core"] diff --git a/scripts/custom_checks/docstrings.py b/scripts/custom_checks/docstrings.py index 229e21e673..7353e4dc1c 100644 --- a/scripts/custom_checks/docstrings.py +++ b/scripts/custom_checks/docstrings.py @@ -273,7 +273,7 @@ def docstring_is_correct(self): # If the function returns None, we don't want a returns-section: return_annot_is_correct = self.return_parameter is None elif self.return_parameter is not None: - return_annot_is_correct = self.actual_return_annotation == self.return_parameter.annotation + return_annot_is_correct = not bool(self.return_parameter.annotation) parsed_annotations = dict((p.var_name, p.annotation) for p in self.parameters) parameters_are_correct = ( @@ -283,9 +283,10 @@ def docstring_is_correct(self): or ( list(self.actual_annotations.keys()) == list(parsed_annotations.keys()) # Do the annotations match? - and list(self.actual_annotations.values()) == list(parsed_annotations.values()) + and not any(list(parsed_annotations.values())) ) ) + return return_annot_is_correct and parameters_are_correct def _create_docstring_param_description(self): @@ -296,7 +297,7 @@ def _create_docstring_param_description(self): doc_descr = dict((p.var_name, p.description) for p in self.parameters) for var, annot in self.actual_annotations.items(): description = doc_descr.get(var, "No description.") - fixed_lines.append(f"{whitespace} {var} ({annot}): {description}") + fixed_lines.append(f"{whitespace} {var}: {description}") if self.add_space_after_args: fixed_lines.append("") return fixed_lines @@ -312,7 +313,7 @@ def _create_docstring_return_description(self): whitespace = " " * self.indentation fixed_lines = [ f"{whitespace}{self.RETURN_STRING}", - f"{whitespace} {self.actual_return_annotation}: {description}", + f"{whitespace} {description}", ] if self.add_space_after_returns: fixed_lines.append("")