Basic

Setup

import uuid
from typing import Tuple

import openai  # replace with your favorite LLM client library

from burr.core import action, State, ApplicationBuilder, when, persistence

Define Actions

We define two actions:

  1. human_input – this is the first one, it accepts a prompt from the outside and adds it to the state

  2. ai_response – this is the second one, it takes the prompt + chat history and queries OpenAI.

While this is just a pass-through to OpenAI (and is thus a little pointless) – its a start. We’ll be adding more later.

@action(reads=[], writes=["prompt", "chat_history"])
def human_input(state: State, prompt: str) -> Tuple[dict, State]:
    """Pulls human input from the outside world and massages it into a standard chat format.
    Note we're adding it into the chat history (with an `append` operation). This 
    is just for convenience of reference -- we could easily just store the chat history
    and access it.
    """
    
    chat_item = {
        "content": prompt,
        "role": "user"
    }
    # return the prompt as the result
    # put the prompt in state and update the chat_history
    return (
        {"prompt": prompt}, 
        state.update(prompt=prompt).append(chat_history=chat_item)
    )

@action(reads=["chat_history"], writes=["response", "chat_history"])
def ai_response(state: State) -> Tuple[dict, State]:
    """Queries OpenAI with the chat. You could easily use langchain, etc... to handle this,
    but we wanted to keep it simple to demonstrate"""
    client = openai.Client()  # replace with your favorite LLM client library
    content = client.chat.completions.create(
        model="gpt-3.5-turbo",
        messages=state["chat_history"],
    ).choices[0].message.content
    chat_item = {
        "content": content,
        "role": "assistant"
    }
    # return the response as the result
    # put the response in state and update the chat history
    return (
        {"response": content},     
        state.update(response=content).append(chat_history=chat_item)
    )

Create the app

We create our app by adding our actions (they’re **kwargs, the name of the action is the key), then adding transitions. In this case, the transitions are simple – we just go in a loop. Why a loop? We want to be able to continue the chat. Although the control flow will pause (move from the application to the caller) after every ai_response call, we want to keep state, etc…

app = (
    ApplicationBuilder().with_actions(
        human_input=human_input,
        ai_response=ai_response
    ).with_transitions(
        ("human_input", "ai_response"),
        ("ai_response", "human_input")
    ).with_state(chat_history=[])
    .with_entrypoint("human_input")
    .build()
)
app.visualize(output_file_path="digraph_initial", format="png")

Run the app

To run the app, we call the .run function, passing in a stopping condition. In this case, we want it to halt after ai_response. It returns the action it ran, the result it got, and the state. We use te state variable of response to print out the output, although in a react-like frontend system we may elect to return the entire chat history and render it all for the user.

final_action, result, state = app.run(
    halt_after=["ai_response"], 
    inputs={"prompt" : "Who was Aaron Burr?"}
)
print(state['response'])

Add a decision making step

Let’s add a step to check if the prompt is “safe”. In this case OpenAI does this automatically, so we’re going to simulate it by marking it as unsafe it the word “unsafe” is in the response. We’re going to add one step that checks for safety, and another that drafts a response in the case that it is unsafe.

Then, we’re going to add a “conditional” transition, allowing us to respond differently depending on the value of safe.

@action(reads=["prompt"], writes=["safe"])
def safety_check(state: State) -> Tuple[dict, State]:
    safe = "unsafe" not in state["prompt"]
    return {"safe": safe}, state.update(safe=safe)


@action(reads=[], writes=["response", "chat_history"])
def unsafe_response(state: State) -> Tuple[dict, State]:
    content = "I'm sorry, my overlords have forbidden me to respond."
    new_state = (
        state
        .update(response=content)
        .append(
            chat_history={"content": content, "role": "assistant"})
    )
    return {"response": content}, new_state
safe_app = (
    ApplicationBuilder().with_actions(
        human_input=human_input,
        ai_response=ai_response,
        safety_check=safety_check,
        unsafe_response=unsafe_response
    ).with_transitions(
        ("human_input", "safety_check"),
        ("safety_check", "unsafe_response", when(safe=False)),
        ("safety_check", "ai_response", when(safe=True)),
        (["unsafe_response", "ai_response"], "human_input"),
    ).with_state(chat_history=[])
    .with_entrypoint("human_input")
    .build()
)
safe_app.visualize(output_file_path="digraph_safe", include_conditions=True)
action, result, state = safe_app.run(
    halt_after=["ai_response", "unsafe_response"], 
    inputs={"prompt": "Who was Aaron Burr, sir (unsafe)?"}
)
print(state["response"])

Tracking

OK, now let’s interact with telemetry! All we have to do is add with_tracker call. We can also just grab the builder from the prior app and start where we left off. We’ll run on quite a few prompts to test this out:

app_with_tracker = (
    ApplicationBuilder().with_actions(
        human_input=human_input,
        ai_response=ai_response,
        safety_check=safety_check,
        unsafe_response=unsafe_response
    ).with_transitions(
        ("human_input", "safety_check"),
        ("safety_check", "unsafe_response", when(safe=False)),
        ("safety_check", "ai_response", when(safe=True)),
        (["unsafe_response", "ai_response"], "human_input"),
    ).with_state(chat_history=[])
    .with_entrypoint("human_input")
    .with_tracker(
        "local", project="demo_getting_started"
    ).build()
)
app_with_tracker.uid
for prompt in [
    "Who was Aaron Burr, sir?",
    "Who was Aaron Burr, sir (unsafe)?",
    "If you had ml/ai libraries called 'Hamilton' and 'Burr', what would they do?",
    "Who was Aaron Burr, sir?",
    "Who was Aaron Burr, sir (unsafe)?",
    "If you had ml/ai libraries called 'Hamilton' and 'Burr', what would they do?",
]:
    action_we_ran, result, state = app_with_tracker.run(
        halt_after=["ai_response", "unsafe_response"], 
        inputs={"prompt": prompt}
    )

Tracking Server

You can run the tracking server by running burr in the terminal. If you want to see it live, you can run the subsequence cell (which does some magic to run it for you). If the embedding in the notebook gets annoying, navigate to the link to the UI, outputted by the next cell.

Press the refresh button (🔄) by “Live” to watch it live. Run the above cell to query.

from IPython.display import Markdown
url = f"[Link to UI](http://localhost:7241/project/demo_getting_started/{app_with_tracker.uid})"
Markdown(url)
import os
import time
from IPython.display import HTML
get_ipython().system = os.system
!burr --no-open > /dev/null 2>&1 &
time.sleep(3) # quick trick to wait for the server to start
url = f"http://localhost:7241/project/demo_getting_started/{app_with_tracker.uid}"
iframe = f'<iframe src="{url}" width="100%" height="1000px"></iframe>'
display(HTML(iframe))

Persistence

While the tracking we showed above does do storage/persistence, Burr has a host of other capabilities to help with state persistence and reloading.

The use case is this – you have quite a few simultaneous conversations, each with their own state/assigned to their own users. You want to be able to store them, pause them when the user logs off, and reload them when the user logs back on. You can do this with persisters. There are two interfaces to them:

  1. A set of pre-build persisters (postgres, sqllite3, redis, etc…) that you can use

  2. A custom persistor class that you write

To add a persistor, you have to tell it to load from a state (.initialize(...)) on the builder, and tell it to save to a state (.with_state_persister).

More about persistence here.

app_id = f"unique_app_id_{uuid.uuid4()}" # unique app ID -- we create it here but this will be generated for you
partition_key = "new_burr_user" # this can be anything. In a chatbot it will 
# we're going to be creating this multiple times to demonstrate so let's stick it in a function
def create_persistable_app():
    sqllite_persister = persistence.SQLLitePersister(db_path="./sqlite.db", table_name="burr_state")
    sqllite_persister.initialize()
    return (
        ApplicationBuilder().with_actions(
            human_input=human_input,
            ai_response=ai_response,
            safety_check=safety_check,
            unsafe_response=unsafe_response
        ).with_transitions(
            ("human_input", "safety_check"),
            ("safety_check", "unsafe_response", when(safe=False)),
            ("safety_check", "ai_response", when(safe=True)),
            (["unsafe_response", "ai_response"], "human_input"),
        ).initialize_from(
            initializer=sqllite_persister,
            resume_at_next_action=True,
            default_state={"chat_history": []},
            default_entrypoint="human_input"
        ).with_state_persister(sqllite_persister)
        .with_identifiers(app_id=app_id, partition_key=partition_key)
        .with_tracker(
            "local", project="demo_getting_started"
        ).build()
    )
app_initial = create_persistable_app()
action, result, state = app_initial.run(
    halt_after=["ai_response", "unsafe_response"], 
    inputs={"prompt": "Who was Aaron Burr, sir?"}
)
for item in state['chat_history']:
    print(item['role'] + ':' + item['content'] + '\n')
del app_initial
app_reloaded = create_persistable_app()

action, result, state = app_reloaded.run(
    halt_after=["ai_response", "unsafe_response"], 
    inputs={"prompt": "Who was Alexander Hamilton?"}
)
for item in state['chat_history']:
    print(item['role'] + ':' + item['content'] + '\n')