|
12446 | 12446 | }, |
12447 | 12447 | "PlatformIdentifier":{ |
12448 | 12448 | "shape":"PlatformIdentifier", |
12449 | | - "documentation":"<p>The platform identifier of the notebook instance runtime environment.</p>" |
| 12449 | + "documentation":"<p>The platform identifier of the notebook instance runtime environment. The default value is <code>notebook-al2-v2</code>.</p>" |
12450 | 12450 | }, |
12451 | 12451 | "InstanceMetadataServiceConfiguration":{ |
12452 | 12452 | "shape":"InstanceMetadataServiceConfiguration", |
|
21172 | 21172 | }, |
21173 | 21173 | "documentation":"<p>The configuration parameters that specify the IAM roles assumed by the execution role of SageMaker (assumable roles) and the cluster instances or job execution environments (execution roles or runtime roles) to manage and access resources required for running Amazon EMR clusters or Amazon EMR Serverless applications.</p>" |
21174 | 21174 | }, |
| 21175 | + "EnableCaching":{"type":"boolean"}, |
21175 | 21176 | "EnableCapture":{"type":"boolean"}, |
21176 | 21177 | "EnableInfraCheck":{ |
21177 | 21178 | "type":"boolean", |
|
24706 | 24707 | "box":true, |
24707 | 24708 | "min":0 |
24708 | 24709 | }, |
| 24710 | + "InferenceComponentDataCacheConfig":{ |
| 24711 | + "type":"structure", |
| 24712 | + "required":["EnableCaching"], |
| 24713 | + "members":{ |
| 24714 | + "EnableCaching":{ |
| 24715 | + "shape":"EnableCaching", |
| 24716 | + "documentation":"<p>Sets whether the endpoint that hosts the inference component caches the model artifacts and container image.</p> <p>With caching enabled, the endpoint caches this data in each instance that it provisions for the inference component. That way, the inference component deploys faster during the auto scaling process. If caching isn't enabled, the inference component takes longer to deploy because of the time it spends downloading the data.</p>", |
| 24717 | + "box":true |
| 24718 | + } |
| 24719 | + }, |
| 24720 | + "documentation":"<p>Settings that affect how the inference component caches data.</p>" |
| 24721 | + }, |
| 24722 | + "InferenceComponentDataCacheConfigSummary":{ |
| 24723 | + "type":"structure", |
| 24724 | + "required":["EnableCaching"], |
| 24725 | + "members":{ |
| 24726 | + "EnableCaching":{ |
| 24727 | + "shape":"EnableCaching", |
| 24728 | + "documentation":"<p>Indicates whether the inference component caches model artifacts as part of the auto scaling process.</p>", |
| 24729 | + "box":true |
| 24730 | + } |
| 24731 | + }, |
| 24732 | + "documentation":"<p>Settings that affect how the inference component caches data.</p>" |
| 24733 | + }, |
24709 | 24734 | "InferenceComponentDeploymentConfig":{ |
24710 | 24735 | "type":"structure", |
24711 | 24736 | "required":["RollingUpdatePolicy"], |
|
24811 | 24836 | "BaseInferenceComponentName":{ |
24812 | 24837 | "shape":"InferenceComponentName", |
24813 | 24838 | "documentation":"<p>The name of an existing inference component that is to contain the inference component that you're creating with your request.</p> <p>Specify this parameter only if your request is meant to create an adapter inference component. An adapter inference component contains the path to an adapter model. The purpose of the adapter model is to tailor the inference output of a base foundation model, which is hosted by the base inference component. The adapter inference component uses the compute resources that you assigned to the base inference component.</p> <p>When you create an adapter inference component, use the <code>Container</code> parameter to specify the location of the adapter artifacts. In the parameter value, use the <code>ArtifactUrl</code> parameter of the <code>InferenceComponentContainerSpecification</code> data type.</p> <p>Before you can create an adapter inference component, you must have an existing inference component that contains the foundation model that you want to adapt.</p>" |
| 24839 | + }, |
| 24840 | + "DataCacheConfig":{ |
| 24841 | + "shape":"InferenceComponentDataCacheConfig", |
| 24842 | + "documentation":"<p>Settings that affect how the inference component caches data.</p>" |
24814 | 24843 | } |
24815 | 24844 | }, |
24816 | 24845 | "documentation":"<p>Details about the resources to deploy with this inference component, including the model, container, and compute resources.</p>" |
|
24837 | 24866 | "BaseInferenceComponentName":{ |
24838 | 24867 | "shape":"InferenceComponentName", |
24839 | 24868 | "documentation":"<p>The name of the base inference component that contains this inference component.</p>" |
| 24869 | + }, |
| 24870 | + "DataCacheConfig":{ |
| 24871 | + "shape":"InferenceComponentDataCacheConfigSummary", |
| 24872 | + "documentation":"<p>Settings that affect how the inference component caches data.</p>" |
24840 | 24873 | } |
24841 | 24874 | }, |
24842 | 24875 | "documentation":"<p>Details about the resources that are deployed with this inference component.</p>" |
|
36136 | 36169 | }, |
36137 | 36170 | "S3DataDistributionType":{ |
36138 | 36171 | "shape":"ProcessingS3DataDistributionType", |
36139 | | - "documentation":"<p>Whether to distribute the data from Amazon S3 to all processing instances with <code>FullyReplicated</code>, or whether the data from Amazon S3 is shared by Amazon S3 key, downloading one shard of data to each processing instance.</p>" |
| 36172 | + "documentation":"<p>Whether to distribute the data from Amazon S3 to all processing instances with <code>FullyReplicated</code>, or whether the data from Amazon S3 is sharded by Amazon S3 key, downloading one shard of data to each processing instance.</p>" |
36140 | 36173 | }, |
36141 | 36174 | "S3CompressionType":{ |
36142 | 36175 | "shape":"ProcessingS3CompressionType", |
|
0 commit comments