Spaces:
Sleeping
Sleeping
File size: 5,287 Bytes
93b7629 34d088d 0b4deb4 34d088d 0b4deb4 f0f4ec9 93b7629 0b4deb4 f0f4ec9 0b4deb4 f0f4ec9 93b7629 f0f4ec9 34d088d 6dc58d3 34d088d f0f4ec9 0b4deb4 34d088d f0f4ec9 29487e8 34d088d f0f4ec9 34d088d f0f4ec9 34d088d 0b4deb4 34d088d f0f4ec9 34d088d 93b7629 34d088d f730427 cfce56e f0f4ec9 34d088d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 |
#from dotenv import load_dotenv, find_dotenv
#_ = load_dotenv(find_dotenv())
import solara
from typing import Any, Callable, Optional, TypeVar, Union, cast, overload, List
from typing_extensions import TypedDict
import time
import ipyvue
import reacton
from solara.alias import rv as v
import os
import openai
from openai import OpenAI
import instructor
from pydantic import BaseModel, Field
from langsmith import traceable
from langsmith.wrappers import wrap_openai
# NEEDED FOR INPUT TEXT AREA INSTEAD OF INPUT TEXT
def use_change(el: reacton.core.Element, on_value: Callable[[Any], Any], enabled=True):
"""Trigger a callback when a blur events occurs or the enter key is pressed."""
on_value_ref = solara.use_ref(on_value)
on_value_ref.current = on_value
def add_events():
def on_change(widget, event, data):
if enabled:
on_value_ref.current(widget.v_model)
widget = cast(ipyvue.VueWidget, solara.get_widget(el))
if enabled:
widget.on_event("blur", on_change)
widget.on_event("keyup.enter", on_change)
def cleanup():
if enabled:
widget.on_event("blur", on_change, remove=True)
widget.on_event("keyup.enter", on_change, remove=True)
return cleanup
solara.use_effect(add_events, [enabled])
@solara.component
def InputTextarea(
label: str,
value: Union[str, solara.Reactive[str]] = "",
on_value: Callable[[str], None] = None,
disabled: bool = False,
password: bool = False,
continuous_update: bool = False,
error: Union[bool, str] = False,
message: Optional[str] = None,
):
reactive_value = solara.use_reactive(value, on_value)
del value, on_value
def set_value_cast(value):
reactive_value.value = str(value)
def on_v_model(value):
if continuous_update:
set_value_cast(value)
messages = []
if error and isinstance(error, str):
messages.append(error)
elif message:
messages.append(message)
text_area = v.Textarea(
v_model=reactive_value.value,
on_v_model=on_v_model,
label=label,
disabled=disabled,
type="password" if password else None,
error=bool(error),
messages=messages,
solo=True,
hide_details=True,
outlined=True,
rows=1,
auto_grow=True,
)
use_change(text_area, set_value_cast, enabled=not continuous_update)
return text_area
# EXTRACTION
openai.api_key = os.environ['OPENAI_API_KEY']
# Wrap the OpenAI client with LangSmith
client = wrap_openai(OpenAI())
# Patch the client with instructor
client = instructor.from_openai(client, mode=instructor.Mode.TOOLS)
class Person(BaseModel):
name: str
age: int
class People(BaseModel):
people: List[Person] = Field(..., default_factory=list)
class MessageDict(TypedDict):
role: str
content: str
def add_chunk_to_ai_message(chunk: str):
messages.value = [
*messages.value[:-1],
{
"role": "assistant",
"content": chunk,
},
]
# DISPLAYED OUTPUT
@solara.component
def ChatInterface():
with solara.lab.ChatBox():
if len(messages.value)>0:
if messages.value[-1]["role"] != "user":
solara.Markdown(messages.value[-1]["content"], style={"font-size": "1.2em", "color": "blue"})
messages: solara.Reactive[List[MessageDict]] = solara.reactive([])
aux = solara.reactive("")
text_block = solara.reactive("Alice is 18 years old, Bob is ten years older and Charles is thirty years old.")
@solara.component
def Page():
with solara.Head():
solara.Title("Extractor")
with solara.Column(style={"width": "70%", "padding": "50px"}):
solara.Markdown("#Extractor")
solara.Markdown("Enter some text and the language model will try to extract names and ages of the people in the text. Done with :heart: by [alonsosilva](https://twitter.com/alonsosilva)")
extraction_stream = client.chat.completions.create_partial(
model="gpt-3.5-turbo",
response_model=People,
messages=[
{
"role": "user",
"content": f"Get the information about the people: {text_block}",
},
],
stream=True,
)
user_message_count = len([m for m in messages.value if m["role"] == "user"])
def send():
messages.value = [*messages.value, {"role": "user", "content": "Hello"}]
def response(message):
for extraction in extraction_stream:
obj = extraction.model_dump()
if f"{obj}" != aux.value:
add_chunk_to_ai_message(f"{obj}")
aux.value = f"{obj}"
def result():
if messages.value != []:
if messages.value[-1]["role"] == "user":
response(messages.value[-1]["content"])
result = solara.lab.use_task(result, dependencies=[user_message_count])
InputTextarea("Enter text:", value=text_block, continuous_update=False)
solara.Button(label="Extract names and ages of the people", on_click=send)
ChatInterface()
Page()
|