Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[🐛 BUG] Update content behaves differently with debug mode on/off #1814

Open
1 of 4 tasks
noobHappylife opened this issue Sep 20, 2024 · 16 comments · May be fixed by #2187
Open
1 of 4 tasks

[🐛 BUG] Update content behaves differently with debug mode on/off #1814

noobHappylife opened this issue Sep 20, 2024 · 16 comments · May be fixed by #2187
Assignees
Labels
🖰 GUI Related to GUI hacktoberfest - 300💎💎💎 Issues rewarded by 300 points hacktoberfest hacktoberfest issues 🆘 Help wanted Open to participation from the community 💥Malfunction Addresses an identified problem. 🟧 Priority: High Must be addressed as soon ⚔️ Quest Tracks quest-bot quests ❓ Question Can be unblocked with an answer

Comments

@noobHappylife
Copy link

noobHappylife commented Sep 20, 2024

What went wrong? 🤔

I'm working on a LLM chatbot example, I'm using update_content to update the partial while streaming response from the LLM. However, while it works, it only works well in debug mode. While turning debug mode off, the update becomes "chunky". (see the video attached).

Env:
Taipy is installed from source, commit 2f33ab1
Ubuntu server 20.04
(Also tested on windows 10)

p.s. I'm not using chat control, because I can't get the streaming response work with it.

Expected Behavior

No response

Steps to Reproduce Issue

Here is the sample code

import os
import base64
from dotenv import load_dotenv
import openai

from taipy.gui import State, Gui, invoke_callback, get_state_id, invoke_long_callback, notify
import taipy.gui.builder as tgb

from PIL import Image

load_dotenv()

def on_init(state):
    state.conv.update_content(state, "")
    state.messages_dict = {}
    state.messages = [
        {
            "role": "assistant",
            "style": "assistant_message", 
            "content": "Hi, how can I help you today?",
        },
    ]
    state.gpt_messages = []
    state.model_host = ""
    state.model_port = ""
    state.model_name = ""
    state.vlm_models = ["model1", "model2", "custom"]
    state.selected_model = "model1"
    new_conv = create_conv(state)
    state.conv.update_content(state, new_conv)
    state.latest_response = 0
    state.client = openai.Client(base_url=f"http://{state.model_host}:{state.model_port}/v1",api_key="null")


def update_state(state: State, resp: str):
    state.messages[-1]["content"] += resp
    if state.latest_response > 4:
        state.conv.update_content(state, create_conv(state))
        state.latest_response = 0
    else:
        state.latest_response += 1



def stream_message(gui, state_id, client, messages, model_name):
    print(f"Stream Message: {state_id}")
    response = client.chat.completions.create(
        messages=messages,
        model=model_name,
        stream=True,
    )

    for chunk in response:
        resp = chunk.choices[0].delta.content
        if resp is None:
            break

        invoke_callback(
            gui,
            state_id,
            update_state,
            [resp],
        )

def get_status(state: State, status: bool):
    if status:
        print("Done")
        state.latest_response = 0
        state.conv.update_content(state, create_conv(state))
        state.gpt_messages.append({
            "role": "assistant",
            "content": [{"type": "text", "text": state.messages[-1]["content"]}],
        })

        # notify(state, "success", "Heavy set function finished!")
    else:
        print("Something went wrong")
        notify(state, "error", "Something went wrong")


def create_conv(state):
    messages_dict = {}
    with tgb.Page() as conversation:
        for i, message in enumerate(state.messages):
#            text = message["content"].replace("<br>", "").replace('"', "'")
            text = message["content"]
            messages_dict[f"message_{i}"] = text
            tgb.text(
                "{messages_dict['" + f"message_{i}" + "'] if messages_dict else ''}",
                class_name=f"message_base {message['style']}",
                mode="md",
                id=f"message_id_{i}",
            )
            tgb.text("", mode="pre")
        # tgb.text(
        #     "{latest_response}",
        #     class_name="message_base assistant_message",
        #     mode="md",
        # )
    state.messages_dict = messages_dict
    return conversation


def encode_image(image_path):
    with open(image_path, "rb") as image_file:
        return base64.b64encode(image_file.read()).decode("utf-8")

def create_gpt_conv(state):
    messages = []
    if state.system_prompt != "":
        _m = {
            "role": "system",
            "content": [{"type": "text", "text": f"{state.system_prompt}"}],
        }
        messages.append(_m)
    
    if state.query_image_path != "":
        base64_image = encode_image(state.query_image_path)
        _m = {
            "role": "user",
            "content": [
                {"type": "text", "text": f"{state.query_message}"},
                {
                    "type": "image_url",
                    "image_url": {"url": f"data:image/jpeg;base64,{base64_image}"},
                },
            ],
        }
        for existing_message in state.gpt_messages:
            image_exists = len([x for x in existing_message["content"] if x["type"] == "image_url"]) > 0
            if image_exists:
                existing_message["content"].pop(1)
            messages.append(existing_message)
    else:
        _m = {
            "role": "user",
            "content": [{"type": "text", "text": f"{state.query_message}"}],
        }
        messages.extend(state.gpt_messages)
    messages.append(_m)

    state.gpt_messages = messages
    
    return messages


def send_message(state):
    client = openai.Client(base_url=f"http://{state.model_host}:{state.model_port}/v1",api_key="null")
    messages = create_gpt_conv(state)
    if state.query_image_path == "":
        state.messages.append(
            {
                "role": "user",
                "style": "user_message",
                "content": state.query_message,
            }
        )
    else:
        state.messages.append(
            {
                "role": "user",
                "style": "user_message",
                "content": f"{state.query_message}\n![user_image]({state.query_image_thumbnail_path})",
            }
        )
    # state.conv.update_content(state, create_conv(state))
    state.messages.append(
        {
            "role": "assistant",
            "style": "assistant_message",
            "content": "",
        }
    )
    invoke_long_callback(
        state=state,
        user_function=stream_message,
        user_function_args=[gui, get_state_id(state), client, messages, state.model_name],
        user_status_function=get_status,
        user_status_function_args=[]
    )
    # notify(state, "info", "Sending message...")
    state.query_message = ""
    state.query_image_path = ""
#    state.image_uploaded = False


def upload_image(state):
    try:
        state.image_uploaded = True
        global index
        # Open the original image
        original_image = Image.open(state.query_image_path)
        
        # Save the original image
        original_image.save(f"images/original_example_{index}.png")
        
        state.query_image_path = f"images/original_example_{index}.png"
        state.latest_image_path = f"images/original_example_{index}.png"
        # Resize the image to create a thumbnail
        thumbnail_image = original_image.copy()
        thumbnail_image.thumbnail((300, 300))
        
        # Save the thumbnail
        thumbnail_image.save(f"images/thumbnail_example_{index}.png")
        
        # Update the state to point to the thumbnail
        state.query_image_thumbnail_path = f"images/thumbnail_example_{index}.png"
        
        # Increment the index for the next image
        index = index + 1
    except Exception as e:
        state.image_uploaded = False
        notify(
            state,
            "error",
            f"An error occurred: {str(e)}",
        )

def reset_chat(state):
    state.messages = []
    state.gpt_messages = []
    state.query_message = ""
    state.query_image_path = ""
    state.latest_image_path = ""
    state.image_uploaded = False
    state.query_image_thumbnail_path = ""
    state.latest_response = 0
    state.conv.update_content(state, create_conv(state))
    state.selected_model = "model1"
    on_init(state)

def vlm_model_config(name):
    config = {
        "model1": {
            "model_host": "placeholder",
            "model_port": "placeholder",
            "model_name": "placeholder",
        },
        "model2": {
            "model_host": "placeholder",
            "model_port": "placeholder",
            "model_name": "placeholder",
        },
        "custom": {
            "model_host": "",
            "model_port": "",
            "model_name": "custom",
        }
    }
    return config.get(name)

def enlarge_image(state):
    with tgb.Page() as bigimage:
        tgb.image(
            "{state.latest_image_path}",
            width="800px"
        )
    state.bigimage.update_content(state, bigimage)
    state.show_bigimage_dialog = True

def close_image(state, id: str, payload : dict):
    state.show_bigimage_dialog = False

def update_model_info(state):
    config = vlm_model_config(state.selected_model)
    state.model_host = config["model_host"]
    state.model_port = config["model_port"]
    state.model_name = config["model_name"]

if __name__ == "__main__":
    index = 0
    query_image_path = ""
    latest_image_path = ""
    query_image_thumbnail_path = ""
    query_message = ""
    messages = []
    gpt_messages = []
    messages_dict = {}
    model_host = ""
    model_port = ""
    model_name = ""
    system_prompt = ""
    latest_response = 0
    show_bigimage_dialog = False
    image_uploaded = False
    vlm_models = ["model1", "model2", "custom"]
    selected_model = "model1"
    client = openai.Client(api_key="")

    with tgb.Page() as page:
        with tgb.layout(columns="300px 1"):
            with tgb.part(class_name="sidebar"):
                tgb.text("## VLM ChatBot", mode="md")
                tgb.button(
                    "New Conversation",
                    class_name="fullwidth plain",
                    id="reset_app_button",
                    on_action=reset_chat,
                )
                tgb.html("br")
                with tgb.part(render="{image_uploaded}"):
                    tgb.image(
                        content="{latest_image_path}", width="240px", class_name="image_preview", on_action=enlarge_image
                    )

            with tgb.part(class_name="p1"):
                with tgb.expandable("Model: {selected_model}",class_name="card-spacing-half-padding h4", expanded=False):
                    with tgb.layout(columns="1 1 1 1"):
                        tgb.selector(
                            value="{selected_model}",
                            lov="{vlm_models}",
                            label="Select a model",
                            on_change=update_model_info,
                            dropdown=True,
                        )
                        tgb.input(
                            "{model_host}",
                            label="Host IP",
                            change_delay=-1,
                        )
                        tgb.input(
                            "{model_port}",
                            label="Host Port",
                            change_delay=-1,
                        )
                        tgb.input(
                            "{model_name}",
                            label="Model Name",
                            change_delay=-1,
                        )
                    tgb.input(
                        "{system_prompt}",
                        label="System Prompt",
                        change_delay=-1,
                        multiline=True,
                        class_name="fullwidth",
                    )
                with tgb.part(height="600px", class_name="card card_chat"):
                    tgb.part(partial="{conv}")
                    
                with tgb.part("card mt1"):
                    tgb.input(
                        "{query_message}",
                        on_action=send_message,
                        change_delay=-1,
                        label="Write your message:",
                        class_name="fullwidth",
                        multiline=True,
                        lines_shown=3
                    )
                    tgb.file_selector(
                        content="{query_image_path}",
                        on_action=upload_image,
                        extensions=".jpg,.jpeg,.png",
                        label="Upload an image",
                    )
                    # tgb.text("Max file size: 1MB")
            tgb.dialog(
                open="{show_bigimage_dialog}",
#                title="Stop on-going vLLM serving",
#                labels=["Stop"],
                on_action=close_image,
                partial="{bigimage}",
            )

    gui = Gui(page)
    conv = gui.add_partial("")
    bigimage = gui.add_partial("")
    gui.run(
        title="🤖VLM ChatBot", 
        dark_mode=True, 
        margin="0px", 
        host="0.0.0.0", 
        port=34545,
    )

Solution Proposed

No response

Screenshots

  • refers to the attached video

Runtime Environment

No response

Browsers

No response

OS

No response

Version of Taipy

No response

Additional Context

No response

Acceptance Criteria

  • Ensure new code is unit tested, and check code coverage is at least 90%.
  • Create related issue in taipy-doc for documentation and Release Notes.

Code of Conduct

  • I have checked the existing issues.
  • I am willing to work on this issue (optional)
@noobHappylife noobHappylife added the 💥Malfunction Addresses an identified problem. label Sep 20, 2024
@noobHappylife noobHappylife changed the title [🐛 BUG] Update content behaves different with debug mode on/off [🐛 BUG] Update content behaves differently with debug mode on/off Sep 20, 2024
@FlorianJacta FlorianJacta added 🖰 GUI Related to GUI ❓ Question Can be unblocked with an answer 🟨 Priority: Medium Not blocking but should be addressed labels Sep 27, 2024
@FlorianJacta
Copy link
Member

@AlexandreSajus Could you check this issue?

@AlexandreSajus
Copy link
Contributor

AlexandreSajus commented Sep 27, 2024

@AlexandreSajus Could you check this issue?

EDIT: My bad, I always thought you said the code works when debug was off. I don't really know how I could help here. Maybe R&D has an idea on what can be causing this.

We already discussed this on Discord. I think this is expected behavior. Debug mode has to consume performance somewhere (R&D should know more), and this causes any real-time application to be slower. I'm not sure this is an issue.

@jrobinAV jrobinAV added 🟧 Priority: High Must be addressed as soon 🆘 Help wanted Open to participation from the community hacktoberfest hacktoberfest issues hacktoberfest - 300💎💎💎 Issues rewarded by 300 points and removed 🟨 Priority: Medium Not blocking but should be addressed labels Sep 27, 2024
@KunjShah95
Copy link

Kindly allow me to help u to solve the bug

@FlorianJacta
Copy link
Member

@KunjShah95 You are already assigned to another issue. For hacktoberfest, we only assign issues one at a time. Please submit a PR on the other issue first, or remove your assignment.

Thank you.

@KunjShah95
Copy link

i wanrt to work on this issue as i have removed my previous issue

@rehanganapathy
Copy link

hey there, this seems interesting and would love to work on this! Can this issue be assigned to me?

@quest-bot quest-bot bot added the ⚔️ Quest Tracks quest-bot quests label Oct 7, 2024
Copy link

quest-bot bot commented Oct 7, 2024

New Quest! image New Quest!

A new Quest has been launched in @Avaiga’s repo.
Merge a PR that solves this issue to loot the Quest and earn your reward.


Some loot has been stashed in this issue to reward the solver!

🗡 Comment @quest-bot embark to check-in for this Quest and start solving the issue. Other solvers will be notified!

⚔️ When you submit a PR, comment @quest-bot loot #1814 to link your PR to this Quest.

Questions? Check out the docs.

@Rishi-0007
Copy link
Contributor

@quest-bot embark

Copy link

quest-bot bot commented Oct 11, 2024

@Rishi-0007 has embarked on their Quest. 🗡

  • @Rishi-0007 has been on GitHub since 2021.
  • They have merged 2 public PRs in that time.
  • Their swords are blessed with HTML and TypeScript magic ✨
  • They have contributed to this repo before.

This is not an assignment to the issue. Please check the repo’s contribution guidelines before submitting a PR.

Questions? Check out the docs.

@Rishi-0007
Copy link
Contributor

Hi @jrobinAV , Please assign this to me.

@snehaamujri
Copy link

hey @jrobinAV @AlexandreSajus Could you please assign this to me , I am interested in working on this issuse

@rohitdash08
Copy link

Can you please assign this to me?

rohitdash08 added a commit to rohitdash08/taipy that referenced this issue Oct 30, 2024
@rohitdash08 rohitdash08 linked a pull request Oct 30, 2024 that will close this issue
@FredLL-Avaiga
Copy link
Member

@noobHappylife I think we should help you use the chat control with streaming and all your problems will disappear :-)
Partial does not seem to be a good fit for what you're trying to do.
I'm not sure slowing down the refresh of partials so that it can show streaming data in a more convincing manner will help the Taipy community :-)

FredLL-Avaiga added a commit to rohitdash08/taipy that referenced this issue Oct 30, 2024
@noobHappylife
Copy link
Author

@noobHappylife I think we should help you use the chat control with streaming and all your problems will disappear :-) Partial does not seem to be a good fit for what you're trying to do. I'm not sure slowing down the refresh of partials so that it can show streaming data in a more convincing manner will help the Taipy community :-)

Sure it's understandable, perhaps a chat control is the way forward. Looking forward to have a working example with chat streaming. Thank you.

@FredLL-Avaiga
Copy link
Member

FredLL-Avaiga commented Nov 5, 2024

Here is a chat streaming example

import datetime
import re
import time
import typing as t

import requests  # type: ignore[import-untyped]

import taipy.gui.builder as tgb
from taipy.gui import Gui, Icon, State, get_state_id, invoke_callback, invoke_long_callback

# The Wikipedia API used to generate content for a date
wiki_url = "https://en.wikipedia.org/api/rest_v1/feed/onthisday/{type}/{month}/{day}"
event_types = {
    "happen": "events",
    "passé": "events",
    "born": "births",
    "né": "births",
    "dead": "deaths",
    "mort": "deaths",
}
user_agent = "https://taipy.io/demo"

# the list of messages
messages: list[tuple[str, str, str]] = []  # (Message id, message, sender)

# the users
users = [
    ["wikipedia", Icon("https://www.wikipedia.org/static/apple-touch/wikipedia.png", "Wikipedia")],
    ["taipy", Icon("https://docs.taipy.io/en/latest/assets/images/favicon.png", "Taipy")],
]


def on_init(state: State):
    # Do not share the message list with other users
    state.messages = []


def add_image_to_message(state: State, idx: int, text: str, image_url: str):
    msg_content: str = state.messages[idx][1]
    pos = msg_content.find(text)
    if pos > -1:
        msg_content = msg_content[: pos + len(text)] + f"\n\n![{text}]({image_url})" + msg_content[pos + len(text) :]
        set_message(state, msg_content, idx)


def update_message_with_image(gui: Gui, state_id: str, message_idx: int, text: str, image: dict):
    if src := image.get("source"):
        time.sleep(0.2)
        invoke_callback(
            gui,
            state_id,
            add_image_to_message,
            [message_idx, text, src],
        )


def update_message(state: State, json, event_type: str, for_date: str, idx: int):
    if isinstance(json, dict):
        # response header
        set_message(state, f"{event_type} for {for_date}: \n", idx)

        for event in json.get(event_type, []):
            time.sleep(0.2)
            # update response
            append_to_message(state, f"\n* {event.get('year', '')}: {event.get('text', '')}", idx)
            invoke_long_callback(
                state=state,
                user_function=update_message_with_image,
                user_function_args=[
                    gui,
                    get_state_id(state),
                    idx,
                    event.get("text", ""),
                    event.get("pages", [{}])[0].get("thumbnail", {}),
                ],
            )


def set_message(state: State, message: str, idx: t.Optional[int] = None):
    if idx is not None and idx < len(state.messages):
        msg = state.messages[idx]
        state.messages[idx] = (msg[0], message, msg[2])
    else:
        idx = len(state.messages)
        state.messages.append((f"{len(state.messages)}", message, users[0][0]))
    state.refresh("messages")
    return idx


def append_to_message(state: State, message: str, idx: int):
    if idx < len(state.messages):
        msg = state.messages[idx]
        state.messages[idx] = (msg[0], f"{msg[1]}{message}", msg[2])
        state.refresh("messages")
    return idx


def request_wikipedia(gui: Gui, state_id: str, event_type: str, month: str, day: str):
    idx = invoke_callback(
        gui,
        state_id,
        set_message,
        ["requesting Wikipedia ..."],
    )
    request = wiki_url.format(type=event_type, month=month, day=day)
    req = requests.get(request, headers={"accept": "application/json; charset=utf-8;", "User-Agent": user_agent})
    if req.status_code == 200:
        # display response
        invoke_callback(
            gui,
            state_id,
            update_message,
            [req.json(), event_type, f"{month}/{day}", idx],
        )
    else:
        invoke_callback(
            gui,
            state_id,
            set_message,
            [f"requesting Wikipedia failed: {req.status_code}", idx],
        )


def send_message(state: State, id: str, payload: dict):
    args = payload.get("args", [])

    # display request
    state.messages.append((f"{len(state.messages)}", args[2], args[3]))
    state.refresh("messages")

    # analyse request
    request = args[2].lower()
    type_event = None
    for word in event_types:
        if word in request:
            type_event = event_types[word]
            break
    type_event = type_event if type_event else "events"

    month = None
    day = None
    for m in re.finditer(r"(\d\d?)", request):
        if month is None:
            month = m.group()
        elif day is None:
            day = m.group()
            break
    if month is None:
        month = f"{datetime.datetime.now().month}"
    if day is None:
        day = f"{datetime.datetime.now().day}"

    # process request
    invoke_long_callback(
        state=state,
        user_function=request_wikipedia,
        user_function_args=[gui, get_state_id(state), type_event, month, day],
    )


if __name__ == "__main__":
    with tgb.Page() as page:
        tgb.chat(
            "{messages}",
            users=users,
            on_action=send_message,
            height="80vh",
        )

    gui = Gui(page)
    gui.run(title="🤖Wikipedia ChatBot")

@FredLL-Avaiga
Copy link
Member

FredLL-Avaiga commented Nov 5, 2024

you can ask what happened in 11 05 ?
or who was born today ?
No intelligence here :-)

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
🖰 GUI Related to GUI hacktoberfest - 300💎💎💎 Issues rewarded by 300 points hacktoberfest hacktoberfest issues 🆘 Help wanted Open to participation from the community 💥Malfunction Addresses an identified problem. 🟧 Priority: High Must be addressed as soon ⚔️ Quest Tracks quest-bot quests ❓ Question Can be unblocked with an answer
Projects
None yet
10 participants