From fe52c7d9e28e71b7f6c6626f9552d6c75fe85e50 Mon Sep 17 00:00:00 2001
From: Hyesoo Kim <100982596+duper203@users.noreply.github.com>
Date: Fri, 4 Oct 2024 14:22:52 -0700
Subject: [PATCH 1/3] Created using Colab
---
.../81_gradio_stream.ipynb | 417 ++++++++++--------
1 file changed, 224 insertions(+), 193 deletions(-)
diff --git a/Solar-Fullstack-LLM-101/81_gradio_stream.ipynb b/Solar-Fullstack-LLM-101/81_gradio_stream.ipynb
index c195400..e765c3c 100644
--- a/Solar-Fullstack-LLM-101/81_gradio_stream.ipynb
+++ b/Solar-Fullstack-LLM-101/81_gradio_stream.ipynb
@@ -1,202 +1,233 @@
{
- "cells": [
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "# 81. Gradio_Stream\n",
- "\n",
- "## Overview \n",
- "This exercise demonstrates how to build a Retrieval-Augmented Generation (RAG) system using Gradio and how to generate and stream responses in real-time using its streaming features. Through this exercise, you will learn to handle real-time interactions with users via a web-based interface. This process helps manage the overall conversation flow, thereby providing more detailed and meaningful responses.\n",
- " \n",
- "## Purpose of the Exercise\n",
- "The purpose of this exercise is to implement real-time response generation and streaming capabilities using Gradio to develop a live interactive chatbot interface. By the end of this tutorial, users will be able to create a dynamic chat system that streams responses as they are generated, enhancing user engagement and interaction.\n",
- "\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 19,
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "\u001b[31mERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\n",
- "guardrails-ai 0.4.3 requires typer[all]<0.10.0,>=0.9.0, but you have typer 0.12.3 which is incompatible.\u001b[0m\u001b[31m\n",
- "\u001b[0m"
- ]
+ "nbformat": 4,
+ "nbformat_minor": 0,
+ "metadata": {
+ "colab": {
+ "provenance": [],
+ "authorship_tag": "ABX9TyP/yVUMm6xvC5ywj3m1XyTx",
+ "include_colab_link": true
+ },
+ "kernelspec": {
+ "name": "python3",
+ "display_name": "Python 3"
+ },
+ "language_info": {
+ "name": "python"
}
- ],
- "source": [
- "!pip install -qU gradio python-dotenv langchain-upstage python-dotenv"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 21,
- "metadata": {},
- "outputs": [],
- "source": [
- "# @title set API key\n",
- "import os\n",
- "import getpass\n",
- "from pprint import pprint\n",
- "import warnings\n",
- "\n",
- "warnings.filterwarnings(\"ignore\")\n",
- "\n",
- "from IPython import get_ipython\n",
- "\n",
- "if \"google.colab\" in str(get_ipython()):\n",
- " # Running in Google Colab. Please set the UPSTAGE_API_KEY in the Colab Secrets\n",
- " from google.colab import userdata\n",
- " os.environ[\"UPSTAGE_API_KEY\"] = userdata.get(\"UPSTAGE_API_KEY\")\n",
- "else:\n",
- " # Running locally. Please set the UPSTAGE_API_KEY in the .env file\n",
- " from dotenv import load_dotenv\n",
- "\n",
- " load_dotenv()\n",
- "\n",
- "if \"UPSTAGE_API_KEY\" not in os.environ:\n",
- " os.environ[\"UPSTAGE_API_KEY\"] = getpass.getpass(\"Enter your Upstage API key: \")\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 22,
- "metadata": {},
- "outputs": [],
- "source": [
- "import gradio as gr\n",
- "\n",
- "from langchain_upstage import ChatUpstage\n",
- "from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
- "from langchain_core.output_parsers import StrOutputParser\n",
- "from langchain.schema import AIMessage, HumanMessage\n",
- "\n",
- "\n",
- "llm = ChatUpstage(streaming=True)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 23,
- "metadata": {},
- "outputs": [],
- "source": [
- "# More general chat\n",
- "chat_with_history_prompt = ChatPromptTemplate.from_messages(\n",
- " [\n",
- " (\"system\", \"You are a helpful assistant.\"),\n",
- " MessagesPlaceholder(variable_name=\"history\"),\n",
- " (\"human\", \"{message}\"),\n",
- " ]\n",
- ")"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 24,
- "metadata": {},
- "outputs": [],
- "source": [
- "chain = chat_with_history_prompt | llm | StrOutputParser()"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 25,
- "metadata": {},
- "outputs": [],
- "source": [
- "def chat(message, history):\n",
- " history_langchain_format = []\n",
- " for human, ai in history:\n",
- " history_langchain_format.append(HumanMessage(content=human))\n",
- " history_langchain_format.append(AIMessage(content=ai))\n",
- "\n",
- " generator = chain.stream({\"message\": message, \"history\": history_langchain_format})\n",
- "\n",
- " assistant = \"\"\n",
- " for gen in generator:\n",
- " assistant += gen\n",
- " yield assistant"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 26,
- "metadata": {},
- "outputs": [],
- "source": [
- "with gr.Blocks() as demo:\n",
- " chatbot = gr.ChatInterface(\n",
- " chat,\n",
- " examples=[\n",
- " \"How to eat healthy?\",\n",
- " \"Best Places in Korea\",\n",
- " \"How to make a chatbot?\",\n",
- " ],\n",
- " title=\"Solar Chatbot\",\n",
- " description=\"Upstage Solar Chatbot\",\n",
- " )\n",
- " chatbot.chatbot.height = 300"
- ]
},
- {
- "cell_type": "code",
- "execution_count": 27,
- "metadata": {},
- "outputs": [
+ "cells": [
{
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Running on local URL: http://127.0.0.1:7861\n",
- "\n",
- "To create a public link, set `share=True` in `launch()`.\n"
- ]
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "view-in-github",
+ "colab_type": "text"
+ },
+ "source": [
+ "
"
+ ]
},
{
- "data": {
- "text/html": [
- "
"
+ "cell_type": "markdown",
+ "source": [
+ "# 81. Gradio_Stream\n",
+ "\n",
+ "## Overview\n",
+ "This exercise demonstrates how to build a Retrieval-Augmented Generation (RAG) system using Gradio and how to generate and stream responses in real-time using its streaming features. Through this exercise, you will learn to handle real-time interactions with users via a web-based interface. This process helps manage the overall conversation flow, thereby providing more detailed and meaningful responses.\n",
+ "\n",
+ "\n",
+ "## Purpose of the Exercise\n",
+ "The purpose of this exercise is to implement real-time response generation and streaming capabilities using Gradio to develop a live interactive chatbot interface. By the end of this tutorial, users will be able to create a dynamic chat system that streams responses as they are generated, enhancing user engagement and interaction."
],
- "text/plain": [
- ""
+ "metadata": {
+ "id": "_3OOzdNKX_F9"
+ }
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {
+ "id": "EeuMajQuX89f"
+ },
+ "outputs": [],
+ "source": [
+ "!pip install -qU gradio python-dotenv langchain-upstage python-dotenv langchain"
]
- },
- "metadata": {},
- "output_type": "display_data"
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "# @title set API key\n",
+ "import os\n",
+ "import getpass\n",
+ "from pprint import pprint\n",
+ "import warnings\n",
+ "\n",
+ "warnings.filterwarnings(\"ignore\")\n",
+ "\n",
+ "from IPython import get_ipython\n",
+ "\n",
+ "if \"google.colab\" in str(get_ipython()):\n",
+ " # Running in Google Colab. Please set the UPSTAGE_API_KEY in the Colab Secrets\n",
+ " from google.colab import userdata\n",
+ " os.environ[\"UPSTAGE_API_KEY\"] = userdata.get(\"UPSTAGE_API_KEY\")\n",
+ "else:\n",
+ " # Running locally. Please set the UPSTAGE_API_KEY in the .env file\n",
+ " from dotenv import load_dotenv\n",
+ "\n",
+ " load_dotenv()\n",
+ "\n",
+ "if \"UPSTAGE_API_KEY\" not in os.environ:\n",
+ " os.environ[\"UPSTAGE_API_KEY\"] = getpass.getpass(\"Enter your Upstage API key: \")"
+ ],
+ "metadata": {
+ "id": "-0QLuchjYNNa"
+ },
+ "execution_count": 2,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "import gradio as gr\n",
+ "\n",
+ "from langchain_upstage import ChatUpstage\n",
+ "from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n",
+ "from langchain_core.output_parsers import StrOutputParser\n",
+ "from langchain.schema import AIMessage, HumanMessage\n",
+ "\n",
+ "\n",
+ "llm = ChatUpstage(model=\"solar-pro\", streaming=True)"
+ ],
+ "metadata": {
+ "id": "WozQzLsbYQci"
+ },
+ "execution_count": 6,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "# More general chat\n",
+ "chat_with_history_prompt = ChatPromptTemplate.from_messages(\n",
+ " [\n",
+ " (\"system\", \"You are a helpful assistant.\"),\n",
+ " MessagesPlaceholder(variable_name=\"history\"),\n",
+ " (\"human\", \"{message}\"),\n",
+ " ]\n",
+ ")"
+ ],
+ "metadata": {
+ "id": "HvUWFysOYR-1"
+ },
+ "execution_count": 7,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "chain = chat_with_history_prompt | llm | StrOutputParser()"
+ ],
+ "metadata": {
+ "id": "md7JLQRXYTVi"
+ },
+ "execution_count": 8,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "def chat(message, history):\n",
+ " history_langchain_format = []\n",
+ " for human, ai in history:\n",
+ " history_langchain_format.append(HumanMessage(content=human))\n",
+ " history_langchain_format.append(AIMessage(content=ai))\n",
+ "\n",
+ " generator = chain.stream({\"message\": message, \"history\": history_langchain_format})\n",
+ "\n",
+ " assistant = \"\"\n",
+ " for gen in generator:\n",
+ " assistant += gen\n",
+ " yield assistant"
+ ],
+ "metadata": {
+ "id": "OSxSlNElYU-C"
+ },
+ "execution_count": 9,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "with gr.Blocks() as demo:\n",
+ " chatbot = gr.ChatInterface(\n",
+ " chat,\n",
+ " examples=[\n",
+ " \"How to eat healthy?\",\n",
+ " \"Best Places in Korea\",\n",
+ " \"How to make a chatbot?\",\n",
+ " ],\n",
+ " title=\"Solar Chatbot\",\n",
+ " description=\"Upstage Solar Chatbot\",\n",
+ " )\n",
+ " chatbot.chatbot.height = 600"
+ ],
+ "metadata": {
+ "id": "LGQNwOMqYWdi"
+ },
+ "execution_count": 13,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "if __name__ == \"__main__\":\n",
+ " demo.launch()"
+ ],
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 625
+ },
+ "id": "xGHiNMUUYX5r",
+ "outputId": "83453647-3c98-4bc0-d50f-39e5f77273d0"
+ },
+ "execution_count": 14,
+ "outputs": [
+ {
+ "output_type": "stream",
+ "name": "stdout",
+ "text": [
+ "Setting queue=True in a Colab notebook requires sharing enabled. Setting `share=True` (you can turn this off by setting `share=False` in `launch()` explicitly).\n",
+ "\n",
+ "Colab notebook detected. To show errors in colab notebook, set debug=True in launch()\n",
+ "Running on public URL: https://a034c11ee66c3ab813.gradio.live\n",
+ "\n",
+ "This share link expires in 72 hours. For free permanent hosting and GPU upgrades, run `gradio deploy` from Terminal to deploy to Spaces (https://huggingface.co/spaces)\n"
+ ]
+ },
+ {
+ "output_type": "display_data",
+ "data": {
+ "text/plain": [
+ ""
+ ],
+ "text/html": [
+ ""
+ ]
+ },
+ "metadata": {}
+ }
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "# Example\n",
+ ""
+ ],
+ "metadata": {
+ "id": "KOCMuJ6yZEKV"
+ }
}
- ],
- "source": [
- "if __name__ == \"__main__\":\n",
- " demo.launch()"
- ]
- }
- ],
- "metadata": {
- "kernelspec": {
- "display_name": "Python 3 (ipykernel)",
- "language": "python",
- "name": "python3"
- },
- "language_info": {
- "codemirror_mode": {
- "name": "ipython",
- "version": 3
- },
- "file_extension": ".py",
- "mimetype": "text/x-python",
- "name": "python",
- "nbconvert_exporter": "python",
- "pygments_lexer": "ipython3",
- "version": "3.10.10"
- }
- },
- "nbformat": 4,
- "nbformat_minor": 4
-}
+ ]
+}
\ No newline at end of file
From a0d137de222b2fefa482988db8f2457849edf6eb Mon Sep 17 00:00:00 2001
From: Hyesoo Kim <100982596+duper203@users.noreply.github.com>
Date: Fri, 4 Oct 2024 14:23:18 -0700
Subject: [PATCH 2/3] update path
---
Solar-Fullstack-LLM-101/81_gradio_stream.ipynb | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/Solar-Fullstack-LLM-101/81_gradio_stream.ipynb b/Solar-Fullstack-LLM-101/81_gradio_stream.ipynb
index e765c3c..8f3dc75 100644
--- a/Solar-Fullstack-LLM-101/81_gradio_stream.ipynb
+++ b/Solar-Fullstack-LLM-101/81_gradio_stream.ipynb
@@ -23,7 +23,7 @@
"colab_type": "text"
},
"source": [
- "
"
+ "
"
]
},
{
@@ -230,4 +230,4 @@
}
}
]
-}
\ No newline at end of file
+}
From 5b75a9ace454b73f654a6d4790b9b688e26b4aa0 Mon Sep 17 00:00:00 2001
From: Hyesoo Kim <100982596+duper203@users.noreply.github.com>
Date: Mon, 7 Oct 2024 09:53:49 -0700
Subject: [PATCH 3/3] Update 81_gradio_stream.ipynb :env setting
---
.../81_gradio_stream.ipynb | 46 +++++++++----------
1 file changed, 23 insertions(+), 23 deletions(-)
diff --git a/Solar-Fullstack-LLM-101/81_gradio_stream.ipynb b/Solar-Fullstack-LLM-101/81_gradio_stream.ipynb
index 8f3dc75..332e6ce 100644
--- a/Solar-Fullstack-LLM-101/81_gradio_stream.ipynb
+++ b/Solar-Fullstack-LLM-101/81_gradio_stream.ipynb
@@ -56,29 +56,29 @@
{
"cell_type": "code",
"source": [
- "# @title set API key\n",
- "import os\n",
- "import getpass\n",
- "from pprint import pprint\n",
- "import warnings\n",
- "\n",
- "warnings.filterwarnings(\"ignore\")\n",
- "\n",
- "from IPython import get_ipython\n",
- "\n",
- "if \"google.colab\" in str(get_ipython()):\n",
- " # Running in Google Colab. Please set the UPSTAGE_API_KEY in the Colab Secrets\n",
- " from google.colab import userdata\n",
- " os.environ[\"UPSTAGE_API_KEY\"] = userdata.get(\"UPSTAGE_API_KEY\")\n",
- "else:\n",
- " # Running locally. Please set the UPSTAGE_API_KEY in the .env file\n",
- " from dotenv import load_dotenv\n",
- "\n",
- " load_dotenv()\n",
- "\n",
- "if \"UPSTAGE_API_KEY\" not in os.environ:\n",
- " os.environ[\"UPSTAGE_API_KEY\"] = getpass.getpass(\"Enter your Upstage API key: \")"
- ],
+ "# @title set API key\n",
+ "from pprint import pprint\n",
+ "import os\n",
+ "\n",
+ "import warnings\n",
+ "\n",
+ "warnings.filterwarnings(\"ignore\")\n",
+ "\n",
+ "if \"google.colab\" in str(get_ipython()):\n",
+ " # Running in Google Colab. Please set the UPSTAGE_API_KEY in the Colab Secrets\n",
+ " from google.colab import userdata\n",
+ "\n",
+ " os.environ[\"UPSTAGE_API_KEY\"] = userdata.get(\"UPSTAGE_API_KEY\")\n",
+ "else:\n",
+ " # Running locally. Please set the UPSTAGE_API_KEY in the .env file\n",
+ " from dotenv import load_dotenv\n",
+ "\n",
+ " load_dotenv()\n",
+ "\n",
+ "assert (\n",
+ " \"UPSTAGE_API_KEY\" in os.environ\n",
+ "), \"Please set the UPSTAGE_API_KEY environment variable\""
+ ],
"metadata": {
"id": "-0QLuchjYNNa"
},