Skip to content

Openai

Async Chat

Demonstrates how to use the ChatInterface to create a chatbot using OpenAI's with async/await.

Source code for openai_async_chat.py
"""
Demonstrates how to use the `ChatInterface` to create a chatbot using
OpenAI's with async/await.
"""

import panel as pn
from openai import AsyncOpenAI

pn.extension()


async def callback(contents: str, user: str, instance: pn.chat.ChatInterface):
    response = await aclient.chat.completions.create(
        model="gpt-3.5-turbo",
        messages=[{"role": "user", "content": contents}],
        stream=True,
    )
    message = ""
    async for chunk in response:
        part = chunk.choices[0].delta.content
        if part is not None:
            message += part
            yield message


aclient = AsyncOpenAI()
chat_interface = pn.chat.ChatInterface(callback=callback, callback_user="ChatGPT")
chat_interface.send(
    "Send a message to get a reply from ChatGPT!", user="System", respond=False
)
chat_interface.servable()

Authentication

Demonstrates how to use the ChatInterface widget with authentication for OpenAI's API.

Source code for openai_authentication.py
"""
Demonstrates how to use the `ChatInterface` widget with authentication for
OpenAI's API.
"""

import os

import panel as pn
from openai import AsyncOpenAI

SYSTEM_KWARGS = dict(
    user="System",
    respond=False,
)

pn.extension()


def add_key_to_env(key):
    if not key.startswith("sk-"):
        chat_interface.send("Please enter a valid OpenAI key!", **SYSTEM_KWARGS)
        return

    chat_interface.send(
        "Your OpenAI key has been set. Feel free to minimize the sidebar.",
        **SYSTEM_KWARGS,
    )
    chat_interface.disabled = False


key_input = pn.widgets.PasswordInput(placeholder="sk-...", name="OpenAI Key")
pn.bind(add_key_to_env, key=key_input, watch=True)


async def callback(
    contents: str,
    user: str,
    instance: pn.chat.ChatInterface,
):
    if "OPENAI_API_KEY" not in os.environ:
        yield "Please first set your OpenAI key in the sidebar!"
        return

    response = await aclient.chat.completions.create(
        model="gpt-3.5-turbo",
        messages=[{"role": "user", "content": contents}],
        stream=True,
        api_key=key_input.value,
    )
    message = ""
    async for chunk in response:
        part = chunk.choices[0].delta.content
        if part is not None:
            message += part
            yield message


aclient = AsyncOpenAI()
chat_interface = pn.chat.ChatInterface(callback=callback, disabled=True)
chat_interface.send(
    "First enter your OpenAI key in the sidebar, then send a message!", **SYSTEM_KWARGS
)

pn.template.MaterialTemplate(
    title="OpenAI ChatInterface with authentication",
    sidebar=[key_input],
    main=[chat_interface],
).servable()

Chat

Demonstrates how to use the ChatInterface to create a chatbot using OpenAI's API.

Source code for openai_chat.py
"""
Demonstrates how to use the `ChatInterface` to create a chatbot using
OpenAI's API.
"""

import panel as pn
from openai import OpenAI

pn.extension()


async def callback(contents: str, user: str, instance: pn.chat.ChatInterface):
    response = client.chat.completions.create(
        model="gpt-3.5-turbo",
        messages=[{"role": "user", "content": contents}],
        stream=True,
    )
    message = ""
    for chunk in response:
        part = chunk.choices[0].delta.content
        if part is not None:
            message += part
            yield message


client = OpenAI()
chat_interface = pn.chat.ChatInterface(callback=callback, callback_user="ChatGPT")
chat_interface.send(
    "Send a message to get a reply from ChatGPT!", user="System", respond=False
)
chat_interface.servable()

Chat With Hvplot

We use OpenAI Function Calling and hvPlot to create an advanced chatbot that can create plots.

Source code for openai_chat_with_hvplot.py
"""
We use [OpenAI *Function Calling*](https://platform.openai.com/docs/guides/function-calling) and
[hvPlot](https://hvplot.holoviz.org/) to create an **advanced chatbot** that can create plots.
"""
import json
from pathlib import Path

import hvplot.pandas  # noqa
import matplotlib.pyplot as plt
import pandas as pd
import panel as pn
import plotly.io as pio
from openai import AsyncOpenAI

ROOT = Path(__file__).parent

ACCENT = "#00A67E"
THEME = pn.config.theme
CSS_TO_BE_UPSTREAMED_TO_PANEL = """
a {color: var(--accent-fill-rest) !important;}
a:hover {color: var(--accent-fill-hover) !important;}
div.pn-wrapper{height: calc(100% - 25px)}
#sidebar {padding-left: 5px;background: var(--neutral-fill-active)}
"""

JSON_THEME = "light"

MODEL = "gpt-3.5-turbo-1106"
CHAT_GPT_LOGO = "https://upload.wikimedia.org/wikipedia/commons/thumb/0/04/ChatGPT_logo.svg/512px-ChatGPT_logo.svg.png"
CHAT_GPT_URL = "https://chat.openai.com/"
HVPLOT_LOGO = "https://holoviz.org/assets/hvplot.png"
PANEL_LOGO = {
    "default": "https://panel.holoviz.org/_static/logo_horizontal_light_theme.png",
    "dark": "https://panel.holoviz.org/_static/logo_horizontal_dark_theme.png",
}
PANEL_URL = "https://panel.holoviz.org/index.html"

pn.chat.message.DEFAULT_AVATARS["assistant"] = HVPLOT_LOGO
pn.chat.ChatMessage.show_reaction_icons = False


@pn.cache
def _read_data():
    return (
        pd.read_csv(ROOT / "ABC.csv", parse_dates=["date"])
        .sort_values(by="date", ascending=False)
        .head(100)
    )


DATA = _read_data()


@pn.cache
def _read_tool(name: str) -> dict:
    # See https://json-schema.org/learn/glossary
    with open(ROOT / f"tool_{name}.json", encoding="utf8") as file:
        return json.load(file)


TOOLS_MAP = {"hvplot": _read_tool("hvplot"), "renderer": _read_tool("renderer")}
TOOLS = list(TOOLS_MAP.values())

HVPLOT_ARGUMENTS = (
    "`"
    + "`, `".join(sorted(TOOLS_MAP["hvplot"]["function"]["parameters"]["properties"]))
    + "`"
)
EXPLANATION = f"""
## hvPlot by HoloViz
---

`hvPlot` is a high-level plotting library that that works almost in the same way as \
the well known `Pandas` `.plot` method.

The `.hvplot` method supports more data backends, plotting backends and provides more \
features than the `.plot` method.

## OpenAI GPT with Tools
---

We are using the OpenAI `{MODEL}` model with the `hvplot` and `renderer` *tools*.

You can refer to the following `hvplot` arguments

- {HVPLOT_ARGUMENTS}

and `renderer` arguments

- `backend`
"""

SYSTEM_PROMPT = """\
You are now a **Plotting Assistant** that helps users plot their data using `hvPlot` \
by `HoloViz`.\
"""

DATA_PROMPT = f"""\
Hi. Here is a description of your `data`.

The type is `{DATA.__class__.__name__}`. The `dtypes` are

```bash
{DATA.dtypes}
```"""

pn.extension(
    "plotly",
    raw_css=[CSS_TO_BE_UPSTREAMED_TO_PANEL],
)

tools_pane = pn.pane.JSON(
    object=TOOLS, depth=6, theme=JSON_THEME, name="Tools", sizing_mode="stretch_both"
)
tabs_layout = pn.Tabs(
    pn.Column(name="Plot"),
    tools_pane,
    pn.Column(name="Arguments"),
    sizing_mode="stretch_both",
    styles={"border-left": "2px solid var(--neutral-fill-active)"},
)


def _powered_by():
    """Returns a component describing the frameworks powering the chat ui"""
    params = {"height": 50, "sizing_mode": "fixed", "margin": (10, 10)}
    return pn.Column(
        pn.Row(
            pn.pane.Image(CHAT_GPT_LOGO, **params),
            pn.pane.Image(HVPLOT_LOGO, **params),
        ),
        sizing_mode="stretch_width",
    )


def _to_code(kwargs):
    """Returns the .hvplot code corresponding to the kwargs"""
    code = "data.hvplot("
    if kwargs:
        code += "\n"
    for key, value in kwargs.items():
        code += f"    {key}={repr(value)},\n"
    code += ")"
    return code


def _update_tool_kwargs(tool_calls, original_kwargs):
    if tool_calls:
        for tool_call in tool_calls:
            name = tool_call.function.name
            kwargs = json.loads(tool_call.function.arguments)
            if kwargs:
                # the llm does not always specify both the hvplot and renderer args
                # if not is specified its most natural to assume we continue with the
                # same args as before
                original_kwargs[name] = kwargs


def _clean_tool_kwargs(kwargs):
    # Sometimes the llm adds the backend argument to the hvplot arguments
    backend = kwargs["hvplot"].pop("backend", None)
    if backend and "backend" not in kwargs["renderer"]:
        # We add the backend argument to the renderer if none is specified
        kwargs["renderer"]["backend"] = backend
    # Use responsive by default
    if "responsive" not in kwargs:
        kwargs["hvplot"]["responsive"] = True


def _set_theme():
    if THEME == "dark":
        pio.templates.default = "plotly_dark"
        plt.style.use(["default", "dark_background"])
    else:
        pio.templates.default = "plotly"
        plt.style.use(["default", "seaborn-v0_8"])


client = AsyncOpenAI()
tool_kwargs = {"hvplot": {}, "renderer": {}}


async def callback(
    contents: str, user: str, instance
):  # pylint: disable=unused-argument
    """Responds to a task"""
    messages = instance.serialize()
    response = await client.chat.completions.create(
        model=MODEL,
        messages=messages,
        tools=TOOLS,
        tool_choice="auto",
    )
    response_message = response.choices[0].message
    tool_calls = response_message.tool_calls

    _update_tool_kwargs(tool_calls, tool_kwargs)
    _clean_tool_kwargs(tool_kwargs)
    code = _to_code(tool_kwargs["hvplot"])

    response = f"""
Try running

```python
{code}
```"""
    chat_interface.send(response, user="Assistant", respond=False)
    plot = DATA.hvplot(**tool_kwargs["hvplot"])
    _set_theme()
    pane = pn.pane.HoloViews(
        object=plot, sizing_mode="stretch_both", name="Plot", **tool_kwargs["renderer"]
    )
    arguments = pn.pane.JSON(
        tool_kwargs,
        sizing_mode="stretch_both",
        depth=3,
        theme=JSON_THEME,
        name="Arguments",
    )
    tabs_layout[:] = [pane, tools_pane, arguments]


chat_interface = pn.chat.ChatInterface(
    callback=callback,
    show_rerun=False,
    show_undo=False,
    show_clear=False,
    callback_exception="verbose",
)
chat_interface.send(
    SYSTEM_PROMPT,
    user="System",
    respond=False,
)
chat_interface.send(
    DATA_PROMPT,
    user="Assistant",
    respond=False,
)


component = pn.Row(chat_interface, tabs_layout, sizing_mode="stretch_both")

pn.template.FastListTemplate(
    title="Chat with hvPlot",
    sidebar=[
        _powered_by(),
        EXPLANATION,
    ],
    main=[component],
    main_layout=None,
    accent=ACCENT,
).servable()

Hvplot

Demonstrates how to use the ChatInterface to create a simple chatbot that can generate plots of your data using hvPlot.

Source code for openai_hvplot.py
"""
Demonstrates how to use the `ChatInterface` to create a **simple chatbot**
that can generate plots of your data using [hvPlot](https://hvplot.holoviz.org/).
"""

import re
from typing import Union

import pandas as pd
import panel as pn
from openai import AsyncOpenAI
from panel.io.mime_render import exec_with_return

DATAFRAME_PROMPT = """
    Here are the columns in your DataFrame: {columns}.
    Create a plot with hvplot that highlights an interesting
    relationship between the columns with hvplot groupby kwarg.
"""

CODE_REGEX = re.compile(r"```\s?python(.*?)```", re.DOTALL)


def _clean(df: pd.DataFrame):
    df.columns = [column.strip() for column in df.columns]
    df = df.head(100)
    return df


async def respond_with_openai(contents: Union[pd.DataFrame, str]):
    # extract the DataFrame
    if isinstance(contents, pd.DataFrame):
        global df
        df = _clean(contents)
        columns = contents.columns
        message = DATAFRAME_PROMPT.format(columns=columns)
    else:
        message = contents

    response = await aclient.chat.completions.create(
        model="gpt-3.5-turbo",
        messages=[{"role": "user", "content": message}],
        temperature=0,
        max_tokens=500,
        stream=True,
    )
    message = ""
    async for chunk in response:
        part = chunk.choices[0].delta.content
        if part is not None:
            message += part
            yield {"user": "ChatGPT", "object": message}


async def respond_with_executor(code: str):
    code_block = f"```python\n{code}\n```"
    global df
    context = {"df": df}
    plot = exec_with_return(code=code, global_context=context)
    return {
        "user": "Executor",
        "object": pn.Tabs(
            ("Plot", plot),
            ("Code", code_block),
        ),
    }


async def callback(
    contents: Union[str, pd.DataFrame],
    name: str,
    instance: pn.chat.ChatInterface,
):
    if not isinstance(contents, (str, pd.DataFrame)):
        return

    if name == "User":
        async for chunk in respond_with_openai(contents):
            yield chunk
        instance.respond()
    elif CODE_REGEX.search(contents):
        yield await respond_with_executor(CODE_REGEX.search(contents).group(1))


aclient = AsyncOpenAI()
chat_interface = pn.chat.ChatInterface(
    widgets=[pn.widgets.FileInput(name="Upload"), pn.widgets.TextInput(name="Message")],
    callback=callback,
)
# ruff: noqa: E501
chat_interface.send(
    """Send a message to ChatGPT or upload a small CSV file to get started!

<a href="data:text/csv;base64,ZGF0ZSxjYXRlZ29yeSxxdWFudGl0eSxwcmljZQoyMDIxLTAxLTAxLGVsZWN0cm9uaWNzLDIsNTAwICAKMjAyMS0wMS0wMixjbG90aGluZywxLDUwCjIwMjEtMDEtMDMsaG9tZSBnb29kcyw0LDIwMAoyMDIxLTAxLTA0LGVsZWN0cm9uaWNzLDEsMTAwMAoyMDIxLTAxLTA1LGdyb2NlcmllcywzLDc1CjIwMjEtMDEtMDYsY2xvdGhpbmcsMiwxMDAKMjAyMS0wMS0wNyxob21lIGdvb2RzLDMsMTUwCjIwMjEtMDEtMDgsZWxlY3Ryb25pY3MsNCwyMDAwCjIwMjEtMDEtMDksZ3JvY2VyaWVzLDIsNTAKMjAyMS0wMS0xMCxlbGVjdHJvbmljcywzLDE1MDA=" download="example.csv">example.csv</a>
""",
    user="System",
    respond=False,
)
chat_interface.servable()

Image Generation

Demonstrates how to use the ChatInterface to create images using OpenAI's DALL-E API.

Source code for openai_image_generation.py
"""
Demonstrates how to use the `ChatInterface` to create images using
OpenAI's [DALL-E API](https://platform.openai.com/docs/guides/images/image-generation).
"""

import panel as pn
from openai import OpenAI

pn.extension()


def callback(contents: str, user: str, instance: pn.chat.ChatInterface):
    response = client.images.generate(prompt=contents, n=1, size="256x256")
    image_url = response.data[0].url
    return pn.pane.Image(image_url, width=256, height=256)


client = OpenAI()
chat_interface = pn.chat.ChatInterface(
    callback=callback, callback_user="DALL-E", placeholder_text="Generating..."
)
chat_interface.send(
    "Create an image by providing a prompt!", user="System", respond=False
)
chat_interface.servable()

Two Bots

Demonstrates how to use the ChatInterface to create two bots that chat with each other.

Source code for openai_two_bots.py
"""
Demonstrates how to use the `ChatInterface` to create two bots that chat with each
other.
"""

import panel as pn
from openai import AsyncOpenAI

pn.extension()


async def callback(
    contents: str,
    user: str,
    instance: pn.chat.ChatInterface,
):
    if user in ["User", "Happy Bot"]:
        callback_user = "Nerd Bot"
        callback_avatar = "🤓"
    elif user == "Nerd Bot":
        callback_user = "Happy Bot"
        callback_avatar = "😃"

    prompt = f"Think profoundly about {contents}, then ask a question."
    response = await aclient.chat.completions.create(
        model="gpt-3.5-turbo",
        messages=[{"role": "user", "content": prompt}],
        stream=True,
        max_tokens=250,
        temperature=0.1,
    )
    message = ""
    async for chunk in response:
        part = chunk.choices[0].delta.content
        if part is not None:
            message += part
            yield {"user": callback_user, "avatar": callback_avatar, "object": message}

    if len(instance.objects) % 6 == 0:  # stop at every 6 messages
        instance.send(
            "That's it for now! Thanks for chatting!", user="System", respond=False
        )
        return
    instance.respond()


aclient = AsyncOpenAI()
chat_interface = pn.chat.ChatInterface(callback=callback)
chat_interface.send(
    "Enter a topic for the bots to discuss! Beware the token usage!",
    user="System",
    respond=False,
)
chat_interface.servable()

With Memory

Demonstrates how to use the ChatInterface to create a chatbot using OpenAI's with async/await.

Source code for openai_with_memory.py
"""
Demonstrates how to use the `ChatInterface` to create a chatbot using
OpenAI's with async/await.
"""

import panel as pn
from openai import AsyncOpenAI

pn.extension()


async def callback(contents: str, user: str, instance: pn.chat.ChatInterface):
    messages = instance.serialize()[1:]
    response = await aclient.chat.completions.create(
        model="gpt-3.5-turbo",
        messages=messages,
        stream=True,
    )
    message = ""
    async for chunk in response:
        part = chunk.choices[0].delta.content
        if part is not None:
            message += part
            yield message


aclient = AsyncOpenAI()
chat_interface = pn.chat.ChatInterface(callback=callback, callback_user="ChatGPT")
chat_interface.send(
    "Send a message to get a reply from ChatGPT!", user="System", respond=False
)
chat_interface.servable()