Skip to content

Add mistral-common library #1641

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 2 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 20 additions & 2 deletions packages/tasks/src/local-apps.ts
Original file line number Diff line number Diff line change
Expand Up @@ -207,10 +207,28 @@ curl -X POST "http://localhost:8000/v1/completions" \\
"temperature": 0.5
}'`;
const runCommand = model.tags.includes("conversational") ? runCommandInstruct : runCommandNonInstruct;

let setup;
let dockerCommand;

if (model.tags.includes("mistral-common")) {
setup = [
"# Install vLLM from pip:",
"pip install vllm",
"# Make sure you have the latest version of mistral-common installed:",
"pip install --upgrade mistral-common"
].join("");
dockerCommand = `# Load and run the model:\ndocker exec -it my_vllm_container bash -c "vllm serve ${model.id} --tokenizer_mode mistral --config_format mistral --load_format mistral --tool-call-parser mistral --enable-auto-tool-choice"`;
}
else {
setup = ["# Install vLLM from pip:", "pip install vllm"].join("");
dockerCommand = `# Load and run the model:\ndocker exec -it my_vllm_container bash -c "vllm serve ${model.id}"`;
}

return [
{
title: "Install from pip",
setup: ["# Install vLLM from pip:", "pip install vllm"].join("\n"),
setup: setup,
content: [`# Load and run the model:\nvllm serve "${model.id}"`, runCommand],
},
{
Expand All @@ -227,7 +245,7 @@ curl -X POST "http://localhost:8000/v1/completions" \\
` --model ${model.id}`,
].join("\n"),
content: [
`# Load and run the model:\ndocker exec -it my_vllm_container bash -c "vllm serve ${model.id}"`,
dockerCommand,
runCommand,
],
},
Expand Down
20 changes: 20 additions & 0 deletions packages/tasks/src/model-libraries-snippets.ts
Original file line number Diff line number Diff line change
Expand Up @@ -1893,4 +1893,24 @@ audio = model.autoencoder.decode(codes)[0].cpu()
torchaudio.save("sample.wav", audio, model.autoencoder.sampling_rate)
`,
];

export const mistral_common = (model: ModelData): string[] => [
`# We recommend to use vLLM to serve Mistral AI models.
pip install vllm

# Make sure to have installed the latest version of mistral-common.
pip install --upgrade mistral-common[image,audio]

# Serve the model with an OpenAI-compatible API.
vllm serve ${model.id} --tokenizer_mode mistral --config_format mistral --load_format mistral --tool-call-parser mistral --enable-auto-tool-choice

# Query the model with curl in a separate terminal.
curl http://localhost:8000/v1/chat/completions \
-H "Content-Type: application/json" \
-d '{
"model": "${model.id}",
"messages": [{"role": "user", "content": "What is the capital of France?"}]
}'`,
];

//#endregion
8 changes: 8 additions & 0 deletions packages/tasks/src/model-libraries.ts
Original file line number Diff line number Diff line change
Expand Up @@ -630,6 +630,14 @@ export const MODEL_LIBRARIES_UI_ELEMENTS = {
filter: false,
countDownloads: `path_extension:"ckpt"`,
},
mistral_common: {
prettyLabel: "mistral-common",
repoName: "mistral-common",
repoUrl: "https://github.com/mistralai/mistral-common",
docsUrl: "https://mistralai.github.io/mistral-common/",
snippets: snippets.mistral_common,
countDownloads: `path:"config.json" OR path:"params.json"`,
},
mitie: {
prettyLabel: "MITIE",
repoName: "MITIE",
Expand Down