diff --git a/docs.json b/docs.json index 2481bcb4..683a41ee 100644 --- a/docs.json +++ b/docs.json @@ -49,14 +49,14 @@ { "group": "Agent via No-Code Builder", "pages": [ - "offline-evals/via-ui/agents-via-no-code-builder/quickstart", - "offline-evals/via-ui/agents-via-no-code-builder/types-of-nodes", - "offline-evals/via-ui/agents-via-no-code-builder/agent-deployment", - "offline-evals/via-ui/agents-via-no-code-builder/error-debugging", - "offline-evals/via-ui/agents-via-no-code-builder/loops", - "offline-evals/via-ui/agents-via-no-code-builder/multi-agent-system", - "offline-evals/via-ui/agents-via-no-code-builder/agent-evals", - "offline-evals/via-ui/agents-via-no-code-builder/variables-in-agents" + "offline-evals/via-ui/agents-via-no-code-builder/experiment-with-prompt-chains", + "offline-evals/via-ui/agents-via-no-code-builder/debug-errors-at-every-node", + "offline-evals/via-ui/agents-via-no-code-builder/use-loops-in-chains", + "offline-evals/via-ui/agents-via-no-code-builder/use-api-nodes-within-chains", + "offline-evals/via-ui/agents-via-no-code-builder/deploy-prompt-chains", + "offline-evals/via-ui/agents-via-no-code-builder/test-prompt-chains", + "offline-evals/via-ui/agents-via-no-code-builder/querying-prompt-chains", + "offline-evals/via-ui/agents-via-no-code-builder/multi-agent-system" ] }, { diff --git a/images/docs/evaluate/how-to/prompt-chains/add-api-node.png b/images/docs/evaluate/how-to/prompt-chains/add-api-node.png index e46a3332..5b244b1c 100644 Binary files a/images/docs/evaluate/how-to/prompt-chains/add-api-node.png and b/images/docs/evaluate/how-to/prompt-chains/add-api-node.png differ diff --git a/images/docs/evaluate/how-to/prompt-chains/add-prompt-state.png b/images/docs/evaluate/how-to/prompt-chains/add-prompt-state.png new file mode 100644 index 00000000..9097bce8 Binary files /dev/null and b/images/docs/evaluate/how-to/prompt-chains/add-prompt-state.png differ diff --git a/images/docs/evaluate/how-to/prompt-chains/agents-multilingual-variables.png b/images/docs/evaluate/how-to/prompt-chains/agents-multilingual-variables.png new file mode 100644 index 00000000..e433b0e6 Binary files /dev/null and b/images/docs/evaluate/how-to/prompt-chains/agents-multilingual-variables.png differ diff --git a/images/docs/evaluate/how-to/prompt-chains/agents-variables-dataset.png b/images/docs/evaluate/how-to/prompt-chains/agents-variables-dataset.png new file mode 100644 index 00000000..32c40500 Binary files /dev/null and b/images/docs/evaluate/how-to/prompt-chains/agents-variables-dataset.png differ diff --git a/images/docs/evaluate/how-to/prompt-chains/agents-variables-test-run-report.png b/images/docs/evaluate/how-to/prompt-chains/agents-variables-test-run-report.png new file mode 100644 index 00000000..eb8c9407 Binary files /dev/null and b/images/docs/evaluate/how-to/prompt-chains/agents-variables-test-run-report.png differ diff --git a/images/docs/evaluate/how-to/prompt-chains/api-node-editor.png b/images/docs/evaluate/how-to/prompt-chains/api-node-editor.png index 0aff87a9..a81a884b 100644 Binary files a/images/docs/evaluate/how-to/prompt-chains/api-node-editor.png and b/images/docs/evaluate/how-to/prompt-chains/api-node-editor.png differ diff --git a/images/docs/evaluate/how-to/prompt-chains/api-node-variables.png b/images/docs/evaluate/how-to/prompt-chains/api-node-variables.png new file mode 100644 index 00000000..02ceafe6 Binary files /dev/null and b/images/docs/evaluate/how-to/prompt-chains/api-node-variables.png differ diff --git a/images/docs/evaluate/how-to/prompt-chains/api-node.png b/images/docs/evaluate/how-to/prompt-chains/api-node.png index b4a30c5a..367657e4 100644 Binary files a/images/docs/evaluate/how-to/prompt-chains/api-node.png and b/images/docs/evaluate/how-to/prompt-chains/api-node.png differ diff --git a/images/docs/evaluate/how-to/prompt-chains/code-node.png b/images/docs/evaluate/how-to/prompt-chains/code-node.png index c7d8214a..53c5c50c 100644 Binary files a/images/docs/evaluate/how-to/prompt-chains/code-node.png and b/images/docs/evaluate/how-to/prompt-chains/code-node.png differ diff --git a/images/docs/evaluate/how-to/prompt-chains/conditional-node.png b/images/docs/evaluate/how-to/prompt-chains/conditional-node.png new file mode 100644 index 00000000..7a6dfdfe Binary files /dev/null and b/images/docs/evaluate/how-to/prompt-chains/conditional-node.png differ diff --git a/images/docs/evaluate/how-to/prompt-chains/edit-api-node.png b/images/docs/evaluate/how-to/prompt-chains/edit-api-node.png index fdec92a3..ca167620 100644 Binary files a/images/docs/evaluate/how-to/prompt-chains/edit-api-node.png and b/images/docs/evaluate/how-to/prompt-chains/edit-api-node.png differ diff --git a/images/docs/evaluate/how-to/prompt-chains/new-node-state.png b/images/docs/evaluate/how-to/prompt-chains/new-node-state.png index 765c0a14..9aa9db3d 100644 Binary files a/images/docs/evaluate/how-to/prompt-chains/new-node-state.png and b/images/docs/evaluate/how-to/prompt-chains/new-node-state.png differ diff --git a/images/docs/evaluate/how-to/prompt-chains/output-field-selector.png b/images/docs/evaluate/how-to/prompt-chains/output-field-selector.png deleted file mode 100644 index a4bbb1d0..00000000 Binary files a/images/docs/evaluate/how-to/prompt-chains/output-field-selector.png and /dev/null differ diff --git a/images/docs/evaluate/how-to/prompt-chains/product-description-generator-and-translator-chain.png b/images/docs/evaluate/how-to/prompt-chains/product-description-generator-and-translator-chain.png index ca18832a..eda1ea95 100644 Binary files a/images/docs/evaluate/how-to/prompt-chains/product-description-generator-and-translator-chain.png and b/images/docs/evaluate/how-to/prompt-chains/product-description-generator-and-translator-chain.png differ diff --git a/images/docs/evaluate/how-to/prompt-chains/product-images-dataset.png b/images/docs/evaluate/how-to/prompt-chains/product-images-dataset.png deleted file mode 100644 index 84c8b817..00000000 Binary files a/images/docs/evaluate/how-to/prompt-chains/product-images-dataset.png and /dev/null differ diff --git a/images/docs/evaluate/how-to/prompt-chains/prompt-chain-error-state.png b/images/docs/evaluate/how-to/prompt-chains/prompt-chain-error-state.png new file mode 100644 index 00000000..b1a4799e Binary files /dev/null and b/images/docs/evaluate/how-to/prompt-chains/prompt-chain-error-state.png differ diff --git a/images/docs/evaluate/how-to/prompt-chains/prompt-chain-infinite-loop-trace-sheet.png b/images/docs/evaluate/how-to/prompt-chains/prompt-chain-infinite-loop-trace-sheet.png new file mode 100644 index 00000000..207597ba Binary files /dev/null and b/images/docs/evaluate/how-to/prompt-chains/prompt-chain-infinite-loop-trace-sheet.png differ diff --git a/images/docs/evaluate/how-to/prompt-chains/prompt-chain-infinite-loop.png b/images/docs/evaluate/how-to/prompt-chains/prompt-chain-infinite-loop.png new file mode 100644 index 00000000..bd347cef Binary files /dev/null and b/images/docs/evaluate/how-to/prompt-chains/prompt-chain-infinite-loop.png differ diff --git a/images/docs/evaluate/how-to/prompt-chains/prompt-chain-input.png b/images/docs/evaluate/how-to/prompt-chains/prompt-chain-input.png index 81aa19d0..507d0aa4 100644 Binary files a/images/docs/evaluate/how-to/prompt-chains/prompt-chain-input.png and b/images/docs/evaluate/how-to/prompt-chains/prompt-chain-input.png differ diff --git a/images/docs/evaluate/how-to/prompt-chains/prompt-chain-loops-sheet.png b/images/docs/evaluate/how-to/prompt-chains/prompt-chain-loops-sheet.png new file mode 100644 index 00000000..d0b4a036 Binary files /dev/null and b/images/docs/evaluate/how-to/prompt-chains/prompt-chain-loops-sheet.png differ diff --git a/images/docs/evaluate/how-to/prompt-chains/prompt-chain-loops.png b/images/docs/evaluate/how-to/prompt-chains/prompt-chain-loops.png new file mode 100644 index 00000000..1a94737b Binary files /dev/null and b/images/docs/evaluate/how-to/prompt-chains/prompt-chain-loops.png differ diff --git a/images/docs/evaluate/how-to/prompt-chains/prompt-chain-merge-strategies.png b/images/docs/evaluate/how-to/prompt-chains/prompt-chain-merge-strategies.png new file mode 100644 index 00000000..866ceee8 Binary files /dev/null and b/images/docs/evaluate/how-to/prompt-chains/prompt-chain-merge-strategies.png differ diff --git a/images/docs/evaluate/how-to/prompt-chains/prompt-chain-sheet-filtering.png b/images/docs/evaluate/how-to/prompt-chains/prompt-chain-sheet-filtering.png new file mode 100644 index 00000000..ce4d7ca4 Binary files /dev/null and b/images/docs/evaluate/how-to/prompt-chains/prompt-chain-sheet-filtering.png differ diff --git a/images/docs/evaluate/how-to/prompt-chains/prompt-chain-test-run-dataset.png b/images/docs/evaluate/how-to/prompt-chains/prompt-chain-test-run-dataset.png new file mode 100644 index 00000000..e4ee7c02 Binary files /dev/null and b/images/docs/evaluate/how-to/prompt-chains/prompt-chain-test-run-dataset.png differ diff --git a/images/docs/evaluate/how-to/prompt-chains/prompt-chain-test-run-report.png b/images/docs/evaluate/how-to/prompt-chains/prompt-chain-test-run-report.png index 5a593964..10f91d33 100644 Binary files a/images/docs/evaluate/how-to/prompt-chains/prompt-chain-test-run-report.png and b/images/docs/evaluate/how-to/prompt-chains/prompt-chain-test-run-report.png differ diff --git a/images/docs/evaluate/how-to/prompt-chains/prompt-chain-test-run-trigger-sheet.png b/images/docs/evaluate/how-to/prompt-chains/prompt-chain-test-run-trigger-sheet.png index c06a70fa..34cdcbfb 100644 Binary files a/images/docs/evaluate/how-to/prompt-chains/prompt-chain-test-run-trigger-sheet.png and b/images/docs/evaluate/how-to/prompt-chains/prompt-chain-test-run-trigger-sheet.png differ diff --git a/images/docs/evaluate/how-to/prompt-chains/prompt-chain-trace-sheet.png b/images/docs/evaluate/how-to/prompt-chains/prompt-chain-trace-sheet.png new file mode 100644 index 00000000..edd5b011 Binary files /dev/null and b/images/docs/evaluate/how-to/prompt-chains/prompt-chain-trace-sheet.png differ diff --git a/images/docs/evaluate/how-to/prompt-chains/prompt-node.png b/images/docs/evaluate/how-to/prompt-chains/prompt-node.png index 5251d495..3349c36e 100644 Binary files a/images/docs/evaluate/how-to/prompt-chains/prompt-node.png and b/images/docs/evaluate/how-to/prompt-chains/prompt-node.png differ diff --git a/images/docs/evaluate/how-to/prompt-chains/prompt-tool-node.png b/images/docs/evaluate/how-to/prompt-chains/prompt-tool-node.png new file mode 100644 index 00000000..c03a8ad8 Binary files /dev/null and b/images/docs/evaluate/how-to/prompt-chains/prompt-tool-node.png differ diff --git a/images/docs/evaluate/how-to/prompt-chains/test-chain.png b/images/docs/evaluate/how-to/prompt-chains/test-chain.png new file mode 100644 index 00000000..bdae9330 Binary files /dev/null and b/images/docs/evaluate/how-to/prompt-chains/test-chain.png differ diff --git a/images/docs/evaluate/how-to/prompt-chains/translator.png b/images/docs/evaluate/how-to/prompt-chains/translator.png new file mode 100644 index 00000000..8c4db4a4 Binary files /dev/null and b/images/docs/evaluate/how-to/prompt-chains/translator.png differ diff --git a/images/docs/evaluate/how-to/prompt-chains/tutorials/api-block-node.png b/images/docs/evaluate/how-to/prompt-chains/tutorials/api-block-node.png deleted file mode 100644 index 1ffff789..00000000 Binary files a/images/docs/evaluate/how-to/prompt-chains/tutorials/api-block-node.png and /dev/null differ diff --git a/images/docs/evaluate/how-to/prompt-chains/tutorials/code-block-node.png b/images/docs/evaluate/how-to/prompt-chains/tutorials/code-block-node.png deleted file mode 100644 index af3fe6b6..00000000 Binary files a/images/docs/evaluate/how-to/prompt-chains/tutorials/code-block-node.png and /dev/null differ diff --git a/images/docs/evaluate/how-to/prompt-chains/tutorials/create-ticket-in-helpdesk-node.png b/images/docs/evaluate/how-to/prompt-chains/tutorials/create-ticket-in-helpdesk-node.png deleted file mode 100644 index 93c389ac..00000000 Binary files a/images/docs/evaluate/how-to/prompt-chains/tutorials/create-ticket-in-helpdesk-node.png and /dev/null differ diff --git a/images/docs/evaluate/how-to/prompt-chains/tutorials/customer-support-agent.png b/images/docs/evaluate/how-to/prompt-chains/tutorials/customer-support-agent.png deleted file mode 100644 index 9eb63618..00000000 Binary files a/images/docs/evaluate/how-to/prompt-chains/tutorials/customer-support-agent.png and /dev/null differ diff --git a/images/docs/evaluate/how-to/prompt-chains/tutorials/email-classifier-node.png b/images/docs/evaluate/how-to/prompt-chains/tutorials/email-classifier-node.png deleted file mode 100644 index 48a5cff0..00000000 Binary files a/images/docs/evaluate/how-to/prompt-chains/tutorials/email-classifier-node.png and /dev/null differ diff --git a/images/docs/evaluate/how-to/prompt-chains/tutorials/multi-agent-system-code.png b/images/docs/evaluate/how-to/prompt-chains/tutorials/multi-agent-system-code.png new file mode 100644 index 00000000..af5d81eb Binary files /dev/null and b/images/docs/evaluate/how-to/prompt-chains/tutorials/multi-agent-system-code.png differ diff --git a/images/docs/evaluate/how-to/prompt-chains/tutorials/multi-agent-system-routing-agent.png b/images/docs/evaluate/how-to/prompt-chains/tutorials/multi-agent-system-routing-agent.png new file mode 100644 index 00000000..97c5fdda Binary files /dev/null and b/images/docs/evaluate/how-to/prompt-chains/tutorials/multi-agent-system-routing-agent.png differ diff --git a/images/docs/evaluate/how-to/prompt-chains/tutorials/multi-agent-system.png b/images/docs/evaluate/how-to/prompt-chains/tutorials/multi-agent-system.png new file mode 100644 index 00000000..5e1127c1 Binary files /dev/null and b/images/docs/evaluate/how-to/prompt-chains/tutorials/multi-agent-system.png differ diff --git a/images/docs/evaluate/how-to/prompt-chains/tutorials/priority-scorer-node.png b/images/docs/evaluate/how-to/prompt-chains/tutorials/priority-scorer-node.png deleted file mode 100644 index a0a9bc5a..00000000 Binary files a/images/docs/evaluate/how-to/prompt-chains/tutorials/priority-scorer-node.png and /dev/null differ diff --git a/images/docs/evaluate/how-to/prompt-chains/tutorials/product-description-generator-and-translator-chain.png b/images/docs/evaluate/how-to/prompt-chains/tutorials/product-description-generator-and-translator-chain.png deleted file mode 100644 index ca18832a..00000000 Binary files a/images/docs/evaluate/how-to/prompt-chains/tutorials/product-description-generator-and-translator-chain.png and /dev/null differ diff --git a/images/docs/evaluate/how-to/prompt-chains/tutorials/product-description-generator-bot-prompt-node.png b/images/docs/evaluate/how-to/prompt-chains/tutorials/product-description-generator-bot-prompt-node.png deleted file mode 100644 index f4494813..00000000 Binary files a/images/docs/evaluate/how-to/prompt-chains/tutorials/product-description-generator-bot-prompt-node.png and /dev/null differ diff --git a/images/docs/evaluate/how-to/prompt-chains/tutorials/send-email-response-api-block.png b/images/docs/evaluate/how-to/prompt-chains/tutorials/send-email-response-api-block.png deleted file mode 100644 index aa7269da..00000000 Binary files a/images/docs/evaluate/how-to/prompt-chains/tutorials/send-email-response-api-block.png and /dev/null differ diff --git a/images/docs/evaluate/how-to/prompt-chains/tutorials/support-response-generator-node.png b/images/docs/evaluate/how-to/prompt-chains/tutorials/support-response-generator-node.png deleted file mode 100644 index bf1b7a5f..00000000 Binary files a/images/docs/evaluate/how-to/prompt-chains/tutorials/support-response-generator-node.png and /dev/null differ diff --git a/images/docs/evaluate/how-to/prompt-chains/tutorials/translator-prompt-node.png b/images/docs/evaluate/how-to/prompt-chains/tutorials/translator-prompt-node.png deleted file mode 100644 index e504d03f..00000000 Binary files a/images/docs/evaluate/how-to/prompt-chains/tutorials/translator-prompt-node.png and /dev/null differ diff --git a/offline-evals/via-ui/agents-via-no-code-builder/agent-evals.mdx b/offline-evals/via-ui/agents-via-no-code-builder/agent-evals.mdx deleted file mode 100644 index 148a7ab8..00000000 --- a/offline-evals/via-ui/agents-via-no-code-builder/agent-evals.mdx +++ /dev/null @@ -1,48 +0,0 @@ ---- -title: No-Code Agent Evals -description: Test Agents using datasets to evaluate performance across examples ---- - - -After testing in the playground, evaluate your Agents across multiple test cases to ensure consistent performance using the test runs. - - - - -Add test cases by creating a [Dataset](/library/datasets/import-or-create-datasets). For this example, we'll use a Dataset of product images to generate descriptions. - -![Dataset with product images for testing](/images/docs/offline-evals/via-ui/agents-via-no-code-builder/prompt-chain-test-run-dataset.png) - - - - - -Create an Agent that processes your test examples. In this case, the agent generates product descriptions, translates them to multiple languages, and formats them to match specific requirements. - -![Agent for product description generation](/images/docs/offline-evals/via-ui/agents-via-no-code-builder/product-description-generator-and-translator-chain.png) - - - - - -Open the test configuration by clicking the Test button on the top right corner. - - - - - -Select your dataset and add [Evaluators](/library/evaluators/pre-built-evaluators) to measure the quality of outputs. - -![Test configuration with dataset and evaluator options](/images/docs/offline-evals/via-ui/agents-via-no-code-builder/prompt-chain-test-run-trigger-sheet.png) - - - - - -Monitor the [test run](/offline-evals/concepts#test-runs) to analyze the performance of your agent across all inputs. - -![Test run results showing performance metrics](/images/docs/offline-evals/via-ui/agents-via-no-code-builder/prompt-chain-test-run-report.png) - - - - diff --git a/offline-evals/via-ui/agents-via-no-code-builder/error-debugging.mdx b/offline-evals/via-ui/agents-via-no-code-builder/debug-errors-at-every-node.mdx similarity index 57% rename from offline-evals/via-ui/agents-via-no-code-builder/error-debugging.mdx rename to offline-evals/via-ui/agents-via-no-code-builder/debug-errors-at-every-node.mdx index 9d7742c3..01da943e 100644 --- a/offline-evals/via-ui/agents-via-no-code-builder/error-debugging.mdx +++ b/offline-evals/via-ui/agents-via-no-code-builder/debug-errors-at-every-node.mdx @@ -11,18 +11,24 @@ Find and resolve issues in your Agents with clear error diagnostics. When a step This targeted error handling helps you quickly fix issues before they cascade through your AI workflow. -![Error details shown for a failed step in an Agent](/images/docs/offline-evals/via-ui/agents-via-no-code-builder/prompt-chain-error-state.png) +![Error details shown for a failed step in an Agent](/images/docs/evaluate/how-to/prompt-chains/prompt-chain-error-state.png) In this example, the agent stopped at the Code node due to the input being a non-JSON parseable string. -In addition, once the workflow is executed, you can view the full execution log of the node by clicking on the icon in the top right corner for every node. +Click on the `View run logs` button in the bottom toolbar to see the full execution log of the chain and the data passed in each node. -![Traces shown for error details for a node in an Agent](/images/docs/offline-evals/via-ui/agents-via-no-code-builder/prompt-chain-trace-sheet.png) +![Traces shown for error details for a node in an Agent](/images/docs/evaluate/how-to/prompt-chains/prompt-chain-trace-sheet.png) +### Debugging errors in loops In cases where a node executes multiple times due to loops, view the execution log of each iteration and in cases of errors, use it to identify the exact inputs and outputs that led to the error. Let's take an example use case where the code checks if the input is JSON parseable or not. And here, the code block has a bug where it sends back the data to the code block to send it in an infinite loop. -![Infinite loop in a code block](/images/docs/offline-evals/via-ui/agents-via-no-code-builder/prompt-chain-infinite-loop.png) +![Infinite loop in a code block](/images/docs/evaluate/how-to/prompt-chains/prompt-chain-infinite-loop.png) -In this case, click on the icon in the top right corner of the code block to show the execution log of each iteration. -![Traces shown for error details for a node in an Agent](/images/docs/offline-evals/via-ui/agents-via-no-code-builder/prompt-chain-infinite-loop-trace-sheet.png) +Click on the `View run logs` button in the bottom toolbar to see the data passed in each iteration. +![Traces shown for error details for a node in an Agent](/images/docs/evaluate/how-to/prompt-chains/prompt-chain-infinite-loop-trace-sheet.png) + +### Search and filtering +You can search and filter the execution log by using the search bar and the filters to filter specific types of errors. + +![Search and filter the execution log](/images/docs/evaluate/how-to/prompt-chains/prompt-chain-sheet-filtering.png) \ No newline at end of file diff --git a/offline-evals/via-ui/agents-via-no-code-builder/agent-deployment.mdx b/offline-evals/via-ui/agents-via-no-code-builder/deploy-prompt-chains.mdx similarity index 69% rename from offline-evals/via-ui/agents-via-no-code-builder/agent-deployment.mdx rename to offline-evals/via-ui/agents-via-no-code-builder/deploy-prompt-chains.mdx index 0ffb413e..1f5f7671 100644 --- a/offline-evals/via-ui/agents-via-no-code-builder/agent-deployment.mdx +++ b/offline-evals/via-ui/agents-via-no-code-builder/deploy-prompt-chains.mdx @@ -1,19 +1,19 @@ --- title: Agent Deployment -description: Quick iterations on agents should not require code deployments every time. With more and more stakeholders working on prompt engineering, its critical to keep deployments of agents as easy as possible without much overhead. Agent deployments on Maxim allow conditional deployment of agent changes that can be used via the SDK. +description: Quick iterations on Agents should not require code deployments every time. With more and more stakeholders working on prompt engineering, its critical to keep deployments of Agents as easy as possible without much overhead. Agent deployments on Maxim allow conditional deployment of Agent changes that can be used via the SDK. --- -## Why Deploy Agents via No-Code Builder via Maxim +## Why deploy Agents via Maxim -- Prompt experimentation - Create multiple versions of your agents, and use a wide variety of models available on Maxim to test and compare their performance using your custom data. +- Prompt experimentation - Create multiple versions of your Agents, and use a wide variety of models available on Maxim to test and compare their performance using your custom data. - Deploy without code changes - Deploy the final version directly from UI—no code changes required. Use Maxim's RBAC support to limit deployment permission to key stakeholders. - Custom variables - Use custom variables to create rules to control which environments or user groups should receive the updates. This helps in setting up A/B tests or testing prompt variations internally before pushing to users. ### Deploying an Agent - - Navigate to Evaluation > Agents via no-code builder and open the agent you want to deploy. + + Navigate to Evaluation > Prompts > Prompt Chains and open the prompt chain you want to deploy. Click the 🚀 icon in the header and choose to deploy the present version. @@ -27,16 +27,9 @@ description: Quick iterations on agents should not require code deployments ever Edit or define new variables by clicking "Edit deployment variables" - Define the name and type of any variable. - - For variables of type `select` provide possible options. e.g. Environment: Beta, Staging, Prod. + Define the name and type of any variable. For variables of type `select` provide possible options. e.g. Environment: Beta, Staging, Prod. ![Add new deployment variable](/images/docs/evaluate/how-to/evaluate-prompts/deployments/add-new-deployment-variable.png) - - - For variables of type `multiselect`, configure when the deployment runs: - - all selected options are present in the deployment rule using the `=` operator, or - - any of the selected options are present in the deployment rule using the `includes` operator. - - ![Multiselect deployment rule](/images/docs/evaluate/how-to/evaluate-prompts/deployments/multiselect-deployment-rule.png) Every time you have a new version to deploy, use the variable based rules to deploy conditionally. @@ -50,7 +43,7 @@ description: Quick iterations on agents should not require code deployments ever ## Fetching Agents via SDK -For building query to get agent with specific deployment variables, you can use `QueryBuilder`. +For building query to get Agent with specific deployment variables, you can use `QueryBuilder`. ```typescript JS/TS @@ -137,29 +130,4 @@ Adding filters based on Tags ``` -Use multiselect variables - - ```typescript JS/TS - import { Maxim, QueryBuilder } from "@maximai/maxim-js; - - const maxim = new Maxim({ apiKey: "", promptManagement: true}); - - const prompt = await maxim.getPromptChain( - "prompt-chain-id", - new QueryBuilder().and().deploymentVar("Tenant ID", ["Tenant1"]).build(), - ); - ``` - - ```python Python - from maxim import Maxim, Config - from maxim.models import QueryBuilder - - maxim = Maxim(Config(api_key="", prompt_management=True)) - - prompt = maxim.get_prompt_chain("prompt-chain-id", - QueryBuilder() - .and_() - .deployment_var("Tenant ID", ["Tenant1"]) - .build()) - ``` - +Learn more about advanced [prompt chain querying](/evaluate/how-to/evaluate-chains/querying-prompt-chains) techniques. \ No newline at end of file diff --git a/offline-evals/via-ui/agents-via-no-code-builder/experiment-with-prompt-chains.mdx b/offline-evals/via-ui/agents-via-no-code-builder/experiment-with-prompt-chains.mdx new file mode 100644 index 00000000..496736ae --- /dev/null +++ b/offline-evals/via-ui/agents-via-no-code-builder/experiment-with-prompt-chains.mdx @@ -0,0 +1,90 @@ +--- +title: Types of nodes +description: Connect various types of blocks to create sophisticated AI systems using our visual editor +--- + + +## Available nodes + + + + +Run any [Prompt](/evaluate/how-to/evaluate-prompts/experiment-in-prompt-playground) as part of your agent. + +- Select from your existing prompts and versions or create an entirely new prompt directly in the editor +- Support for variables and context +- Input passed as user message +- Additionally, any variables defined in the prompt are available as input to the block +- 3 available outputs: + - Assistant's response + - Context retrieved during the LLM call + - Output of attached tool call (if called by the LLM) +- Click on the to edit the selected prompt inline + +Perfect for natural language tasks like classification, generation or analysis. + +![Example of a Prompt node in the editor](/images/docs/evaluate/how-to/prompt-chains/prompt-node.png) + + + + +Add JavaScript logic to process data between nodes. + +- Write custom code to transform data +- Automatic JSON parsing of input +- Automatic stringification of output +- Seamless connection with other nodes +- All parameters included in the `execute` function are available as input to the block +- The output of the code block must be an object. All keys of the returned object are available as output for the next blocks +- Click on the to edit the selected code block inline + +Ideal for data manipulation, validation or custom processing. + +Example of a Code node with JavaScript editor + + + + +Connect external services into your agent. + +- Support for GET, POST and other methods +- Variable substitution in headers, params and body +- Automatic data format handling +- Easy integration with other nodes +- All variables defined in the API node are available as input to the block +- The payload returned from the call is available as output for the next blocks +- Click on the to edit the selected API node inline + +Essential for retrieving external data or triggering services. + +Example of an API node configuration + + + + +Add conditional routing to your agent. + +- Support for multiple conditions +- 2 available outputs: + - Then: Would route data through this port if condition is met + - Else: Would route data through this port if condition is not met + +Essential when data needs to be routed based on certain conditions. + +Example of a Conditional node configuration + + + + +Attach prompt tools to your prompts. + +- The tool call is available as output for the next blocks +- Major use case is to connect the output back to the prompt to have the LLM call the tool again with the updated context + + +You cannot directly create prompt tools in the editor. Attach the tools to the prompt node in the configuration section. They will get attached automatically to the prompt. + + +Example of a Prompt tool node configuration + + diff --git a/offline-evals/via-ui/agents-via-no-code-builder/meta.json b/offline-evals/via-ui/agents-via-no-code-builder/meta.json index 8c3cd1dd..17113b68 100644 --- a/offline-evals/via-ui/agents-via-no-code-builder/meta.json +++ b/offline-evals/via-ui/agents-via-no-code-builder/meta.json @@ -1,13 +1,13 @@ { - "title": "Evaluate Agents", + "title": "Evaluate Chains", "pages": [ - "quickstart", - "types-of-nodes", - "variables-in-agents", - "multi-agent-system", - "loops", - "error-debugging", - "agent-deployment", - "agent-evals" + "experiment-with-prompt-chains", + "debug-errors-at-every-node", + "use-loops-in-chains", + "use-api-nodes-within-chains", + "deploy-prompt-chains", + "test-prompt-chains", + "querying-prompt-chains", + "multi-agent-system" ] } diff --git a/offline-evals/via-ui/agents-via-no-code-builder/multi-agent-system.mdx b/offline-evals/via-ui/agents-via-no-code-builder/multi-agent-system.mdx index 94efff72..cdf432c1 100644 --- a/offline-evals/via-ui/agents-via-no-code-builder/multi-agent-system.mdx +++ b/offline-evals/via-ui/agents-via-no-code-builder/multi-agent-system.mdx @@ -1,5 +1,5 @@ --- -title: Multi-agent System +title: AI-powered multi-agent support system description: Multi-agent systems are a powerful way to build complex applications that can handle a wide variety of tasks. --- @@ -7,13 +7,13 @@ import { MaximPlayer } from "/snippets/maximPlayer.mdx" Create an intelligent workflow that can automatically understand and answer user queries related to their accounts, billings, returns or any FAQ. -![Multi agent system workflow](/images/docs/offline-evals/via-ui/agents-via-no-code-builder/multi-agent-system.png) +![Multi agent system workflow](/images/docs/evaluate/how-to/prompt-chains/tutorials/multi-agent-system.png) The below prompt would act as our routing agent, classifying the user query into different categories (Account, Billing, Returns & FAQ). -![Multi agent system routing agent](/images/docs/offline-evals/via-ui/agents-via-no-code-builder/multi-agent-system-routing-agent.png) +![Multi agent system routing agent](/images/docs/evaluate/how-to/prompt-chains/tutorials/multi-agent-system-routing-agent.png) @@ -21,13 +21,13 @@ The below prompt would act as our routing agent, classifying the user query into Use the code block as a helper to route the data to the corresponding agent based on the category. -![Multi agent system routing helper block](/images/docs/offline-evals/via-ui/agents-via-no-code-builder/multi-agent-system-code.png) +![Multi agent system routing helper block](/images/docs/evaluate/how-to/prompt-chains/tutorials/multi-agent-system-code.png) -Use the [Prompt](/offline-evals/via-ui/prompts/prompt-playground) feature to create the required individual sub-agents. Make sure to attach the required tools for every sub-agent to process the user query. +Use the [Prompt](/evaluate/how-to/evaluate-prompts/experiment-in-prompt-playground) feature to create the required individual sub-agents. Make sure to attach the required tools for every sub-agent to process the user query. Additionally, connect the outputs of all the agents to the `Final output node` to get the final response. @@ -35,7 +35,7 @@ Additionally, connect the outputs of all the agents to the `Final output node` t Here is a video of the agent in action: - + diff --git a/offline-evals/via-ui/agents-via-no-code-builder/querying-prompt-chains.mdx b/offline-evals/via-ui/agents-via-no-code-builder/querying-prompt-chains.mdx new file mode 100644 index 00000000..d7bc0f3e --- /dev/null +++ b/offline-evals/via-ui/agents-via-no-code-builder/querying-prompt-chains.mdx @@ -0,0 +1,453 @@ +--- +title: Query Agents via SDK +description: Learn how to efficiently query and retrieve agents using the Maxim SDK, enabling advanced AI workflow management and customization +--- + +All Python code snippets in this document are for version 3.4.0+. + +## Setting Up the SDK +### Initializing the SDK + + +```typescript JS/TS +import { Maxim } from "@maximai/maxim-js"; + +const maxim = new Maxim({ apiKey: "" }); +``` + +```python Python +from maxim import Maxim, Config + +maxim = Maxim(Config(api_key="")) +``` + + +## Working with Deployment Variables +### For a prompt chain with specific deployment variables + +For building query to get prompt chain with specific deployment variables, you can use `QueryBuilder`. + + +```typescript JS/TS +import { Maxim, QueryBuilder } from "@maximai/maxim-js"; + +const maxim = new Maxim({ apiKey: "" }); + +const promptChain = await maxim.getPromptChain("prompt-chain-id", + new QueryBuilder() + .and() + .deploymentVar("Environment", "prod") + .build()}); +``` + +```python Python +from maxim import Maxim, Config +from maxim.models import QueryBuilder + +maxim = Maxim(Config(api_key="")) + +prompt_chain = maxim.get_prompt_chain("prompt-chain-id", + QueryBuilder() + .and_() + .deployment_var("Environment", "prod") + .build()) +``` + + +### Adding Multiple Queries + + +```typescript JS/TS +import { Maxim, QueryBuilder } from "@maximai/maxim-js"; + +const maxim = new Maxim({ apiKey: "" }); + +const promptChain = await maxim.getPromptChain( + "prompt-chain-id", + new QueryBuilder().and().deploymentVar("Environment", "prod").deploymentVar("CustomerId", "123").build(), +); +``` + +```python Python +from maxim import Maxim, Config +from maxim.models import QueryBuilder + +maxim = Maxim(Config(api_key="")) + +prompt_chain = maxim.get_prompt_chain("prompt-chain-id", + QueryBuilder() + .and_() + .deployment_var("Environment", "prod") + .deployment_var("CustomerId", "123") + .build()) +``` + + +## Querying Prompts Chains + +Sometimes you have usescases where you need to fetch multiple deployed prompt chains at once using a single query. For example, you might want to fetch all prompts for a specific customer or specific workflow. You can use `getPromptChains` function for this purpose. + + + You will need to query using at-least one `deploymentVar` as a filter. Hence you will need to deploy prompt chain versions before querying + them. + + +### Query deployed prompt chains using folder + +To get all prompts from a folder, you can use `getPromptChains` function with `folderId` as a query parameter. + +#### First capture folder id + +There are multiple ways to capture folder id. You can use Maxim dashboard to get folder id. + +1. Right click/click on three dots on the folder you want to get id for. +2. Select `Edit Folder` option. +3. You will see folder id in the form. + +![Settings Page](/images/docs/folder-id.png) + + +```typescript JS/TS +import { Maxim } from "@maximai/maxim-js"; + +const maxim = new Maxim({ apiKey: "" }); + +const folder = await maxim.getFolderById("folder-id"); +``` + +```python Python +from maxim import Maxim, Config + +maxim = Maxim(Config(api_key="")) + +folder = maxim.get_folder_by_id("folder-id") +``` + + +### Getting Folders by Tags + + +```typescript JS/TS +import { Maxim, QueryBuilder } from "@maximai/maxim-js"; + +const maxim = new Maxim({ apiKey: "" }); + +const folders = await maxim.getFolders(new QueryBuilder().and().tag("CustomerId", "123").build()); +``` + +```python Python +from maxim import Maxim, Config +from maxim.models import QueryBuilder + +maxim = Maxim(Config(api_key="")) + +folders = maxim.get_folders(QueryBuilder().and_().tag("CustomerId", "123").build()) +``` + + +All the rules of prompt chain matching algorithm apply here. You can use same overriding techniques as explained above. + +### Retrieving Deployed Prompts from a Folder + + +```typescript JS/TS +import { Maxim, QueryBuilder } from "@maximai/maxim-js"; + +const maxim = new Maxim({ apiKey: "" }); + +const folder = await maxim.getFolderById("folder-id"); +const promptChains = await maxim.getPromptChains( + new QueryBuilder() + .and() + .folder(folder.id) + .deploymentVar("Environment", "prod") + .build(), +); +``` + +```python Python +from maxim import Maxim, Config +from maxim.models import QueryBuilder + +maxim = Maxim(Config(api_key="")) + +folder = maxim.get_folder_by_id("folder-id") +promptChains = maxim.get_folders( + QueryBuilder() + .and_() + .folder(folder.id) + .deployment_var("Environment", "prod") + .build()) +``` + + +### Filtering Prompt Chains by Deployment Variables + + +```typescript JS/TS +import { Maxim, QueryBuilder } from "@maximai/maxim-js; + +const maxim = new Maxim({ apiKey: "" }); + +const folder = await maxim.getFolderById("folder-id"); +const promptChains = await maxim.getPromptChains( + new QueryBuilder() + .and() + .folder(folder.id) + .deploymentVar("Environment", "prod") + .build()); +``` + +```python Python +from maxim import Maxim, Config +from maxim.models import QueryBuilder + +maxim = Maxim(Config(api_key="")) + +folder = maxim.get_folder_by_id("folder-id") +prompt_chains = maxim.get_prompt_chain( + QueryBuilder() + .and_() + .folder(folder.id) + .deployment_var("Environment", "prod") + .build()) +``` + + +You have to pass at-least one filter along with `folder()`. + +## Data Structures +### Prompt Chain Structure + + +```typescript JS/TS +export type PromptChain = { + promptChainId: string; + version: number; + versionId: string; + nodes: ({ order: number } & PromptNode)[]; +}; +// Prompt node +export type PromptNode = { + prompt: Prompt; +}; +// Prompt +export type Prompt = { + promptId: string; + version: number; + versionId: string; + messages: { role: string; content: string | CompletionRequestContent[] }[]; + modelParameters: { [key: string]: any }; + model: string; + tags: PromptTags; +}; +``` + +```python Python +@dataclass +class PromptChain(): + prompt_chain_id: str + version: int + version_id: str + nodes: List[Dict[int, PromptNode]] + +@dataclass +class PromptNode(): + prompt: Prompt + +@dataclass +class Prompt(): + prompt_id: str + version: int + version_id: str + messages: List[Message] + model_parameters: Dict[str, Union[str, int, bool, Dict, None]] + +@dataclass +class Message(): + role: str + content: str +``` + + +### Folder Structure + + +```typescript JS/TS +export type Folder = { + id: string; + name: string; + parentFolderId?: string; + tags: { [key: string]: string }; +}; +``` + +```python Python +@dataclass +class Folder(): + id: str + name: str + parentFolderId: str + tags: Optional[Dict[str, Union[str, int, bool, None]]] = None +``` + + +## Caching +### Using Custom Cache Implementation + +Maxim SDK uses in-memory caching by default. You can use your own caching implementation by passing a custom cache object to the SDK. This allows you to remove complete dependency on our backend. + +### Cache Interface Definition + + +```typescript JS/TS +export interface MaximCache { + getAllKeys(): Promise; + get(key: string): Promise; + set(key: string, value: string): Promise; + delete(key: string): Promise; +} +``` + +```python Python +class MaximInMemoryCache(): + def getAllKeys(self) -> List[str]: + pass + + def get(self, key: str) -> Optional[str]: + pass + + def set(self, key: str, value: str) -> None: + pass + + def delete(self, key: str) -> None: + pass +``` + + +### Implementing Custom Cache + + +```typescript JS/TS +import { Maxim } from "@maximai/maxim-js"; + +const maxim = new Maxim({ apiKey: "api-key", cache: new CustomCache() }); +``` + +```python Python +from maxim import Maxim, Config + +maxim = Maxim(Config(apiKey=apiKey, baseUrl=baseUrl, cache=CustomCache())) +``` + + +### Default In-Memory Cache Implementation + + +```typescript JS/TS +import { MaximCache } from "@maximai/maxim-js"; + +export class MaximInMemoryCache implements MaximCache { + private cache: Map = new Map(); + + getAllKeys(): Promise { + return Promise.resolve(Array.from(this.cache.keys())); + } + + get(key: string): Promise { + return Promise.resolve(this.cache.get(key) || null); + } + set(key: string, value: string): Promise { + this.cache.set(key, value); + return Promise.resolve(); + } +} +``` + +```python Python +class MaximInMemoryCache(): + def __init__(self): + self.cache = {} + + def getAllKeys(self) -> List[str]: + return list(self.cache.keys()) + + def get(self, key: str) -> Optional[str]: + return self.cache.get(key) + + def set(self, key: str, value: str) -> None: + self.cache[key] = value + + def delete(self, key: str) -> None: + if key in self.cache: + del self.cache[key] +``` + + +## Matching Algorithm + +Before going into the details of how to use the SDK, let's understand how the matching algorithm works. Maxim SDK uses best matching entity algorithm. + +1. Let's assume that, you have asked for a prompt chain with deployment var `env` as `prod`, `customerId` as `"123"` and a tag, `tenantId` as `456` for `promptId` - `"abc"`. +2. SDK will first try to find a prompt chain matching all conditions. +3. **If we don't find any matching entity, we enforce only `deploymentVar` conditions (you can override this behaviour, as explained in the next section) and match as many tags as possible.** +4. If we still don't find any prompt chain, we check for a prompt chain version marked as fallback. +5. If we still don't find any prompt chain, we return `null`. + +## Overriding Matching Algorithm +### Enforcing Exact Matches + + +```typescript JS/TS +import { QueryBuilder } from "@maximai/maxim-js; + +const promptChain = await maxim.getPromptChain( + "prompt-chain-id", + new QueryBuilder().and().deploymentVar("Environment", "prod").exactMatch().build(), +); +``` + +```python Python +from maxim import Maxim, Config +from maxim.models import QueryBuilder + +maxim = Maxim(Config(api_key="")) + +prompt_chain = maxim.get_prompt_chain( + "prompt-chain-id", + QueryBuilder() + .and_() + .deployment_var("Environment", "prod") + .exact_match() + .build() +) +``` + + +### Variable-Level Overrides + + +```typescript JS/TS +import { Maxim, QueryBuilder } from "@maximai/maxim-js; + +const maxim = new Maxim({ apiKey: "" }); + +const promptChain = await maxim.getPromptChain("prompt-id", new QueryBuilder().and().deploymentVar("Environment", "prod").build()); +``` + +```python Python +from maxim import Maxim, Config +from maxim.models import QueryBuilder + +maxim = Maxim(Config(api_key="")) + +prompt_chain = maxim.get_prompt_chain( + "prompt-id", + QueryBuilder() + .and_() + .deployment_var("Environment", "prod") + .exact_match() + .build() +) +``` + diff --git a/offline-evals/via-ui/agents-via-no-code-builder/quickstart.mdx b/offline-evals/via-ui/agents-via-no-code-builder/quickstart.mdx deleted file mode 100644 index b4ed33cd..00000000 --- a/offline-evals/via-ui/agents-via-no-code-builder/quickstart.mdx +++ /dev/null @@ -1,30 +0,0 @@ ---- -title: No-Code Agent Quickstart -description: Test your agentic workflows using Agents via no-code builder with Datasets and Evaluators in minutes. View results across your test cases to find areas where it works well or needs improvement. ---- - - - - -Create an agent by connecting Prompt, Code, and API nodes based on your data flow. Each node type handles a specific task in your AI workflow. - -![Connect nodes to create a no-code agent](/images/docs/evaluate/quickstart/prompt-chain/prompt-chain-playground-interface.png) - - - - - -Test your agent against a [Dataset](/library/datasets/import-or-create-datasets) and add [Evaluators](/library/evaluators/pre-built-evaluators) to measure the quality of outputs. Configure any additional parameters needed for your test run. - -![Select dataset and evaluators for testing](/images/docs/evaluate/quickstart/prompt-chain/prompt-chain-testrun-trigger-sheet.png) - - - - - -Analyze the test report for quality metrics like accuracy and performance metrics like latency and cost. Use these insights to iterate on your agent. - -![Analyze test run results](/images/docs/evaluate/quickstart/prompt-chain/prompt-chain-test-run-report.png) - - - diff --git a/offline-evals/via-ui/agents-via-no-code-builder/test-prompt-chains.mdx b/offline-evals/via-ui/agents-via-no-code-builder/test-prompt-chains.mdx new file mode 100644 index 00000000..2cc42c47 --- /dev/null +++ b/offline-evals/via-ui/agents-via-no-code-builder/test-prompt-chains.mdx @@ -0,0 +1,52 @@ +--- +title: Agent Evals +description: Test Agents using datasets to evaluate performance across examples +--- + + +After testing in the playground, evaluate your Agents across multiple test cases to ensure consistent performance using the test runs. + + + + +Add test cases by creating a [Dataset](/library/how-to/datasets/use-dataset-templates). For this example, we'll use a Dataset of product images to generate descriptions. + +![Dataset with product images for testing](/images/docs/evaluate/how-to/prompt-chains/prompt-chain-test-run-dataset.png) + + + + + +Create an Agent that processes your test examples. In this case, the agent generates product descriptions, translates them to multiple languages, and formats them to match specific requirements. + +![Agent for product description generation](/images/docs/evaluate/how-to/prompt-chains/product-description-generator-and-translator-chain.png) + + + + + +Open the test configuration by clicking `Test` in the top right corner. + + + + + +Select your dataset and add [Evaluators](/library/how-to/evaluators/use-pre-built-evaluators) to measure the quality of outputs. + +![Test configuration with dataset and evaluator options](/images/docs/evaluate/how-to/prompt-chains/prompt-chain-test-run-trigger-sheet.png) + + +You can use create and use [Presets](/offline-evals/via-ui/advanced/presets) for your test runs to save time and avoid repeating the same configuration. + + + + + + +Monitor the [test run](/evaluate/concepts#test-runs) to analyze the performance of your Prompt Chain across all inputs. + +![Test run results showing performance metrics](/images/docs/evaluate/how-to/prompt-chains/prompt-chain-test-run-report.png) + + + + diff --git a/offline-evals/via-ui/agents-via-no-code-builder/types-of-nodes.mdx b/offline-evals/via-ui/agents-via-no-code-builder/use-api-nodes-within-chains.mdx similarity index 50% rename from offline-evals/via-ui/agents-via-no-code-builder/types-of-nodes.mdx rename to offline-evals/via-ui/agents-via-no-code-builder/use-api-nodes-within-chains.mdx index 351f9e66..c16a1c16 100644 --- a/offline-evals/via-ui/agents-via-no-code-builder/types-of-nodes.mdx +++ b/offline-evals/via-ui/agents-via-no-code-builder/use-api-nodes-within-chains.mdx @@ -1,14 +1,14 @@ --- -title: Types of Nodes -description: Make external API calls at any point in your agent to integrate with third-party services. The API node lets you validate data, log events, fetch information, or perform any HTTP request without leaving your agent. Simply configure the endpoint, method, and payload to connect your AI workflow with external systems. +title: Use API nodes within chains +description: Make external API calls at any point in your Prompt Chain to integrate with third-party services. The API node lets you validate data, log events, fetch information, or perform any HTTP request without leaving your chain. Simply configure the endpoint, method, and payload to connect your AI workflow with external systems. --- -## Configuring API nodes +### Configuring API nodes -1. Drag from the start node or any existing node to create a connection +1. Drag from any existing node to create a connection or drop an API node from the add node selection 2. Select API from the node type menu ![Add API node](/images/docs/evaluate/how-to/prompt-chains/add-api-node.png) @@ -16,7 +16,7 @@ description: Make external API calls at any point in your agent to integrate wit -- Click the `more menu (3 dots)` in the top-right corner of the node. +- Click the icon in the top-right corner of the node. ![Edit API node](/images/docs/evaluate/how-to/prompt-chains/edit-api-node.png) @@ -41,11 +41,10 @@ Advanced options ![Edit API node](/images/docs/evaluate/how-to/prompt-chains/api-node-editor.png) - -Click `run` to test your API endpoint. By default, the entire response body is set as the node output. To use a specific field from the response use the `Select output field` dropdown to choose the desired response field. + -![Select output field](/images/docs/evaluate/how-to/prompt-chains/output-field-selector.png) +### Using variables - +Use variables in the request body, query parameters and headers using the `{{ variable_name }}` syntax. +![Variables in API node](/images/docs/evaluate/how-to/prompt-chains/api-node-variables.png) - diff --git a/offline-evals/via-ui/agents-via-no-code-builder/loops.mdx b/offline-evals/via-ui/agents-via-no-code-builder/use-loops-in-chains.mdx similarity index 57% rename from offline-evals/via-ui/agents-via-no-code-builder/loops.mdx rename to offline-evals/via-ui/agents-via-no-code-builder/use-loops-in-chains.mdx index 7f946a42..24794640 100644 --- a/offline-evals/via-ui/agents-via-no-code-builder/loops.mdx +++ b/offline-evals/via-ui/agents-via-no-code-builder/use-loops-in-chains.mdx @@ -1,17 +1,20 @@ --- -title: Loops +title: Loops in No-code Agents description: Rerun a part of the flow multiple times --- Use loops when you need to rerun a node or part of the flow multiple times. -![Example of an Agent with loops](/images/docs/offline-evals/via-ui/agents-via-no-code-builder/prompt-chain-loops.png) +![Example of an Agent with loops](/images/docs/evaluate/how-to/prompt-chains/prompt-chain-loops.png) The above example shows a simple agent that would identify the landmark in the given image and return the current weather of that location. The flow that occurs is as follows: 1. The `Prompt` node gets the initial image as an input and runs the LLM to identify the landmark. 2. The `getWeather` tool is called with the location of the landmark as input. 3. The output of the `getWeather` tool is connected back to the `Prompt` node. -4. The `Prompt` node runs again with the updated context and returns an assistant message to move the agent forward. -5. The `Final output` node gets executed and the agent completes its execution. +4. The `Prompt` node runs again with the updated context and returns an assistant message to move the chain forward. +5. The `Final output` node gets executed and the chain completes its execution. + +You can check the full execution log of the chain by clicking on the `View run logs` button in the bottom toolbar. +![View run logs button in the bottom toolbar](/images/docs/evaluate/how-to/prompt-chains/prompt-chain-loops-sheet.png) \ No newline at end of file diff --git a/offline-evals/via-ui/agents-via-no-code-builder/variables-in-agents.mdx b/offline-evals/via-ui/agents-via-no-code-builder/use-variables-in-agents.mdx similarity index 66% rename from offline-evals/via-ui/agents-via-no-code-builder/variables-in-agents.mdx rename to offline-evals/via-ui/agents-via-no-code-builder/use-variables-in-agents.mdx index ce1224eb..9023876b 100644 --- a/offline-evals/via-ui/agents-via-no-code-builder/variables-in-agents.mdx +++ b/offline-evals/via-ui/agents-via-no-code-builder/use-variables-in-agents.mdx @@ -9,19 +9,19 @@ Use variables to pass data to your agent directly from your Dataset. Create custom columns of the type `Variable` in your dataset. All data from these columns would be available as variables in your agent. -![Dataset with custom variables](/images/docs/offline-evals/via-ui/agents-via-no-code-builder/agents-variables-dataset.png) +![Dataset with custom variables](/images/docs/evaluate/how-to/prompt-chains/agents-variables-dataset.png) Use the variables in any of your nodes using the `{{variable_name}}` syntax. -![Variables in agent](/images/docs/offline-evals/via-ui/agents-via-no-code-builder/agents-multilingual-variables.png) +![Variables in agent](/images/docs/evaluate/how-to/prompt-chains/agents-multilingual-variables.png) Trigger a test run and view the results. -![Agent in playground](/images/docs/offline-evals/via-ui/agents-via-no-code-builder/agents-variables-test-run-report.png) +![Agent in playground](/images/docs/evaluate/how-to/prompt-chains/agents-variables-test-run-report.png) diff --git a/sdk/python/integrations/langgraph/langgraph-with-decorator.mdx b/sdk/python/integrations/langgraph/langgraph-with-decorator.mdx index 804914bb..0663eaee 100644 --- a/sdk/python/integrations/langgraph/langgraph-with-decorator.mdx +++ b/sdk/python/integrations/langgraph/langgraph-with-decorator.mdx @@ -214,5 +214,5 @@ print(resp) title="LangGraph integration with decorator (GitHub)" icon="github" href="https://github.com/maximhq/maxim-cookbooks/blob/main/python/observability-online-eval/langgraph/tavily-search-with-decorators.ipynb" - > + />