diff --git a/Makefile b/Makefile
index 155b7ea1..477f0ffe 100644
--- a/Makefile
+++ b/Makefile
@@ -5,6 +5,7 @@ INTEG_API_INVOKE := RestApiUrl HttpApiUrl
INTEG_EXTENSIONS := extension-fn extension-trait logs-trait
# Using musl to run extensions on both AL1 and AL2
INTEG_ARCH := x86_64-unknown-linux-musl
+RIE_MAX_CONCURRENCY ?= 4
define uppercase
$(shell sed -r 's/(^|-)(\w)/\U\2/g' <<< $(1))
@@ -111,4 +112,8 @@ fmt:
cargo +nightly fmt --all
test-rie:
- ./scripts/test-rie.sh $(EXAMPLE)
\ No newline at end of file
+ ./scripts/test-rie.sh $(EXAMPLE)
+
+# Run RIE in Lambda Managed Instance (LMI) mode with concurrent polling.
+test-rie-lmi:
+ RIE_MAX_CONCURRENCY=$(RIE_MAX_CONCURRENCY) ./scripts/test-rie.sh $(EXAMPLE)
diff --git a/examples/basic-lambda/src/main.rs b/examples/basic-lambda/src/main.rs
index d3f2a3cd..396c3afd 100644
--- a/examples/basic-lambda/src/main.rs
+++ b/examples/basic-lambda/src/main.rs
@@ -28,7 +28,10 @@ async fn main() -> Result<(), Error> {
tracing::init_default_subscriber();
let func = service_fn(my_handler);
- lambda_runtime::run(func).await?;
+ if let Err(err) = lambda_runtime::run(func).await {
+ eprintln!("run error: {:?}", err);
+ return Err(err);
+ }
Ok(())
}
diff --git a/lambda-http/src/lib.rs b/lambda-http/src/lib.rs
index 60e279c7..d82ff0d0 100644
--- a/lambda-http/src/lib.rs
+++ b/lambda-http/src/lib.rs
@@ -102,7 +102,7 @@ use std::{
};
mod streaming;
-pub use streaming::{run_with_streaming_response, StreamAdapter};
+pub use streaming::{run_with_streaming_response, run_with_streaming_response_concurrent, StreamAdapter};
/// Type alias for `http::Request`s with a fixed [`Body`](enum.Body.html) type
pub type Request = http::Request
;
@@ -151,6 +151,18 @@ pub struct Adapter<'a, R, S> {
_phantom_data: PhantomData<&'a R>,
}
+impl<'a, R, S> Clone for Adapter<'a, R, S>
+where
+ S: Clone,
+{
+ fn clone(&self) -> Self {
+ Self {
+ service: self.service.clone(),
+ _phantom_data: PhantomData,
+ }
+ }
+}
+
impl<'a, R, S, E> From for Adapter<'a, R, S>
where
S: Service,
@@ -203,6 +215,24 @@ where
lambda_runtime::run(Adapter::from(handler)).await
}
+/// Starts the Lambda Rust runtime in a mode that is compatible with
+/// Lambda Managed Instances (concurrent invocations).
+///
+/// When `AWS_LAMBDA_MAX_CONCURRENCY` is set to a value greater than 1, this
+/// will use a concurrent `/next` polling loop with a bounded number of
+/// in-flight handler tasks. When the environment variable is unset or `<= 1`,
+/// it falls back to the same sequential behavior as [`run`], so the same
+/// handler can run on both classic Lambda and Lambda Managed Instances.
+pub async fn run_concurrent(handler: S) -> Result<(), Error>
+where
+ S: Service + Clone + Send + 'static,
+ S::Future: Send + 'static,
+ R: IntoResponse + Send + Sync + 'static,
+ E: std::fmt::Debug + Into + Send + 'static,
+{
+ lambda_runtime::run_concurrent(Adapter::from(handler)).await
+}
+
#[cfg(test)]
mod test_adapter {
use std::task::{Context, Poll};
diff --git a/lambda-http/src/streaming.rs b/lambda-http/src/streaming.rs
index ed61c773..a729206c 100644
--- a/lambda-http/src/streaming.rs
+++ b/lambda-http/src/streaming.rs
@@ -10,7 +10,7 @@ pub use http::{self, Response};
use http_body::Body;
use lambda_runtime::{
tower::{
- util::{MapRequest, MapResponse},
+ util::{BoxCloneService, MapRequest, MapResponse},
ServiceBuilder, ServiceExt,
},
Diagnostic,
@@ -93,14 +93,33 @@ where
B::Error: Into + Send + Debug,
{
ServiceBuilder::new()
- .map_request(|req: LambdaEvent| {
- let event: Request = req.payload.into();
- event.with_lambda_context(req.context)
- })
+ .map_request(event_to_request as fn(LambdaEvent) -> Request)
.service(handler)
.map_response(into_stream_response)
}
+/// Builds a streaming-aware Tower service from a `Service` that can be
+/// cloned and sent across tasks. This is used by the concurrent HTTP entrypoint.
+#[allow(clippy::type_complexity)]
+fn into_stream_service_boxed(
+ handler: S,
+) -> BoxCloneService, StreamResponse>, E>
+where
+ S: Service, Error = E> + Clone + Send + 'static,
+ S::Future: Send + 'static,
+ E: Debug + Into + Send + 'static,
+ B: Body + Unpin + Send + 'static,
+ B::Data: Into + Send,
+ B::Error: Into + Send + Debug,
+{
+ let svc = ServiceBuilder::new()
+ .map_request(event_to_request as fn(LambdaEvent) -> Request)
+ .service(handler)
+ .map_response(into_stream_response);
+
+ BoxCloneService::new(svc)
+}
+
/// Converts an `http::Response` into a streaming Lambda response.
fn into_stream_response(res: Response) -> StreamResponse>
where
@@ -128,6 +147,11 @@ where
}
}
+fn event_to_request(req: LambdaEvent) -> Request {
+ let event: Request = req.payload.into();
+ event.with_lambda_context(req.context)
+}
+
/// Runs the Lambda runtime with a handler that returns **streaming** HTTP
/// responses.
///
@@ -147,6 +171,24 @@ where
lambda_runtime::run(into_stream_service(handler)).await
}
+/// Runs the Lambda runtime with a handler that returns **streaming** HTTP
+/// responses, in a mode that is compatible with Lambda Managed Instances.
+///
+/// This uses a cloneable, boxed service internally so it can be driven by the
+/// concurrent runtime. When `AWS_LAMBDA_MAX_CONCURRENCY` is not set or `<= 1`,
+/// it falls back to the same sequential behavior as [`run_with_streaming_response`].
+pub async fn run_with_streaming_response_concurrent(handler: S) -> Result<(), Error>
+where
+ S: Service, Error = E> + Clone + Send + 'static,
+ S::Future: Send + 'static,
+ E: Debug + Into + Send + 'static,
+ B: Body + Unpin + Send + 'static,
+ B::Data: Into + Send,
+ B::Error: Into + Send + Debug,
+{
+ lambda_runtime::run_concurrent(into_stream_service_boxed(handler)).await
+}
+
pin_project_lite::pin_project! {
#[non_exhaustive]
pub struct BodyStream {
diff --git a/lambda-runtime-api-client/src/lib.rs b/lambda-runtime-api-client/src/lib.rs
index 3df616ab..86cc715f 100644
--- a/lambda-runtime-api-client/src/lib.rs
+++ b/lambda-runtime-api-client/src/lib.rs
@@ -41,6 +41,7 @@ impl Client {
ClientBuilder {
connector: HttpConnector::new(),
uri: None,
+ pool_size: None,
}
}
}
@@ -59,11 +60,16 @@ impl Client {
self.client.request(req).map_err(Into::into).boxed()
}
- /// Create a new client with a given base URI and HTTP connector.
- fn with(base: Uri, connector: HttpConnector) -> Self {
- let client = hyper_util::client::legacy::Client::builder(hyper_util::rt::TokioExecutor::new())
- .http1_max_buf_size(1024 * 1024)
- .build(connector);
+ /// Create a new client with a given base URI, HTTP connector, and optional pool size hint.
+ fn with(base: Uri, connector: HttpConnector, pool_size: Option) -> Self {
+ let mut builder = hyper_util::client::legacy::Client::builder(hyper_util::rt::TokioExecutor::new());
+ builder.http1_max_buf_size(1024 * 1024);
+
+ if let Some(size) = pool_size {
+ builder.pool_max_idle_per_host(size);
+ }
+
+ let client = builder.build(connector);
Self { base, client }
}
@@ -94,6 +100,7 @@ impl Client {
pub struct ClientBuilder {
connector: HttpConnector,
uri: Option,
+ pool_size: Option,
}
impl ClientBuilder {
@@ -102,6 +109,7 @@ impl ClientBuilder {
ClientBuilder {
connector,
uri: self.uri,
+ pool_size: self.pool_size,
}
}
@@ -111,6 +119,14 @@ impl ClientBuilder {
Self { uri: Some(uri), ..self }
}
+ /// Provide a pool size hint for the underlying Hyper client.
+ pub fn with_pool_size(self, pool_size: usize) -> Self {
+ Self {
+ pool_size: Some(pool_size),
+ ..self
+ }
+ }
+
/// Create the new client to interact with the Runtime API.
pub fn build(self) -> Result {
let uri = match self.uri {
@@ -120,7 +136,7 @@ impl ClientBuilder {
uri.try_into().expect("Unable to convert to URL")
}
};
- Ok(Client::with(uri, self.connector))
+ Ok(Client::with(uri, self.connector, self.pool_size))
}
}
@@ -182,4 +198,17 @@ mod tests {
&req.uri().to_string()
);
}
+
+ #[test]
+ fn builder_accepts_pool_size() {
+ let base = "http://localhost:9001";
+ let expected: Uri = base.parse().unwrap();
+ let client = Client::builder()
+ .with_pool_size(4)
+ .with_endpoint(base.parse().unwrap())
+ .build()
+ .unwrap();
+
+ assert_eq!(client.base, expected);
+ }
}
diff --git a/lambda-runtime/src/layers/api_client.rs b/lambda-runtime/src/layers/api_client.rs
index d44a84f2..7113ee0a 100644
--- a/lambda-runtime/src/layers/api_client.rs
+++ b/lambda-runtime/src/layers/api_client.rs
@@ -44,6 +44,18 @@ where
}
}
+impl Clone for RuntimeApiClientService
+where
+ S: Clone,
+{
+ fn clone(&self) -> Self {
+ Self {
+ inner: self.inner.clone(),
+ client: self.client.clone(),
+ }
+ }
+}
+
#[pin_project(project = RuntimeApiClientFutureProj)]
pub enum RuntimeApiClientFuture {
First(#[pin] F, Arc),
diff --git a/lambda-runtime/src/layers/api_response.rs b/lambda-runtime/src/layers/api_response.rs
index 453f8b4c..5bb3c96f 100644
--- a/lambda-runtime/src/layers/api_response.rs
+++ b/lambda-runtime/src/layers/api_response.rs
@@ -51,6 +51,27 @@ impl Clone
+ for RuntimeApiResponseService<
+ S,
+ EventPayload,
+ Response,
+ BufferedResponse,
+ StreamingResponse,
+ StreamItem,
+ StreamError,
+ >
+where
+ S: Clone,
+{
+ fn clone(&self) -> Self {
+ Self {
+ inner: self.inner.clone(),
+ _phantom: PhantomData,
+ }
+ }
+}
+
impl Service
for RuntimeApiResponseService<
S,
diff --git a/lambda-runtime/src/layers/trace.rs b/lambda-runtime/src/layers/trace.rs
index e93927b1..4a3ad3d9 100644
--- a/lambda-runtime/src/layers/trace.rs
+++ b/lambda-runtime/src/layers/trace.rs
@@ -25,6 +25,7 @@ impl Layer for TracingLayer {
}
/// Tower service returned by [TracingLayer].
+#[derive(Clone)]
pub struct TracingService {
inner: S,
}
diff --git a/lambda-runtime/src/lib.rs b/lambda-runtime/src/lib.rs
index cbcd0a9e..610b608f 100644
--- a/lambda-runtime/src/lib.rs
+++ b/lambda-runtime/src/lib.rs
@@ -39,8 +39,12 @@ pub use lambda_runtime_api_client::tracing;
/// Types available to a Lambda function.
mod types;
+#[cfg(all(unix, feature = "graceful-shutdown"))]
+use crate::runtime::SHUTDOWN_NOTIFY;
use requests::EventErrorRequest;
pub use runtime::{LambdaInvocation, Runtime};
+#[cfg(all(unix, feature = "graceful-shutdown"))]
+use std::time::Duration;
pub use types::{Context, FunctionResponse, IntoFunctionResponse, LambdaEvent, MetadataPrelude, StreamResponse};
/// Error type that lambdas may result in
@@ -59,6 +63,9 @@ pub struct Config {
pub log_stream: String,
/// The name of the Amazon CloudWatch Logs group for the function.
pub log_group: String,
+ /// Maximum concurrent invocations for Lambda managed-concurrency environments.
+ /// Populated from `AWS_LAMBDA_MAX_CONCURRENCY` when present.
+ pub max_concurrency: Option,
}
type RefConfig = Arc;
@@ -75,8 +82,17 @@ impl Config {
version: env::var("AWS_LAMBDA_FUNCTION_VERSION").expect("Missing AWS_LAMBDA_FUNCTION_VERSION env var"),
log_stream: env::var("AWS_LAMBDA_LOG_STREAM_NAME").unwrap_or_default(),
log_group: env::var("AWS_LAMBDA_LOG_GROUP_NAME").unwrap_or_default(),
+ max_concurrency: env::var("AWS_LAMBDA_MAX_CONCURRENCY")
+ .ok()
+ .and_then(|v| v.parse::().ok())
+ .filter(|&c| c > 0),
}
}
+
+ /// Returns true if concurrent runtime mode should be enabled.
+ pub fn is_concurrent(&self) -> bool {
+ self.max_concurrency.map(|c| c > 1).unwrap_or(false)
+ }
}
/// Return a new [`ServiceFn`] with a closure that takes an event and context as separate arguments.
@@ -126,6 +142,30 @@ where
runtime.run().await
}
+/// Starts the Lambda Rust runtime in a mode that is compatible with
+/// Lambda Managed Instances (concurrent invocations).
+///
+/// When `AWS_LAMBDA_MAX_CONCURRENCY` is set to a value greater than 1, this
+/// will use a concurrent `/next` polling loop with a bounded number of
+/// in-flight handler tasks. When the environment variable is unset or `<= 1`,
+/// it falls back to the same sequential behavior as [`run`], so the same
+/// handler can run on both classic Lambda and Lambda Managed Instances.
+pub async fn run_concurrent(handler: F) -> Result<(), Error>
+where
+ F: Service, Response = R> + Clone + Send + 'static,
+ F::Future: Future