This repository was archived by the owner on Apr 14, 2026. It is now read-only.
forked from Abraxas-365/langchain-rust
-
Notifications
You must be signed in to change notification settings - Fork 3
Expand file tree
/
Copy pathinit_chat_model.rs
More file actions
64 lines (55 loc) · 1.59 KB
/
init_chat_model.rs
File metadata and controls
64 lines (55 loc) · 1.59 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
use langchain_ai_rust::language_models::init_chat_model;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Example 1: Simple model initialization
println!("Example 1: Simple model initialization");
let model = init_chat_model("gpt-4o-mini", None, None, None, None, None, None, None).await?;
let response = model.invoke("Say hello in one sentence.").await?;
println!("Response: {}\n", response);
// Example 2: Model with parameters
println!("Example 2: Model with temperature and max_tokens");
let model = init_chat_model(
"gpt-4o-mini",
Some(0.7),
Some(100),
None,
None,
None,
None,
None,
)
.await?;
let response = model.invoke("Count to 5.").await?;
println!("Response: {}\n", response);
// Example 3: Using provider:model format
println!("Example 3: Using provider:model format");
let model = init_chat_model(
"openai:gpt-4o-mini",
Some(0.5),
Some(200),
None,
None,
None,
None,
None,
)
.await?;
let response = model.invoke("What is Rust?").await?;
println!("Response: {}\n", response);
// Example 4: Claude model
println!("Example 4: Claude model");
let model = init_chat_model(
"claude-3-5-sonnet-20240620",
Some(0.8),
Some(150),
None,
None,
None,
None,
None,
)
.await?;
let response = model.invoke("Explain quantum computing briefly.").await?;
println!("Response: {}\n", response);
Ok(())
}