2024-07-24 TiDB
Python: docs.llamaindex.ai
TypeScript: ts.llamaindex.ai
RAG explanation:
RAG, step 1:
documents = SimpleDirectoryReader("data").load_data()
RAG, step 2:
(LlamaParse: it's really good. Really!)
# must have a LLAMA_CLOUD_API_KEY
# bring in deps
from llama_parse import LlamaParse
from llama_index.core import SimpleDirectoryReader
# set up parser
parser = LlamaParse(
result_type="markdown" # "text" also available
)
# use SimpleDirectoryReader to parse our file
file_extractor = {".pdf": parser}
documents = SimpleDirectoryReader(
input_files=['data/canada.pdf'],
file_extractor=file_extractor
).load_data()
print(documents)
RAG, step 3:
Settings.embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-small-en-v1.5")
RAG, step 4:
index = VectorStoreIndex.from_documents(documents)
RAG, step 5:
retriever = index.as_retriever()
nodes = retriever.retrieve("Who is Paul Graham?")
RAG, step 6:
query_engine = index.as_query_engine()
response = query_engine.query("Who is Paul Graham?")
A slack bot
def multiply(a: float, b: float) -> float:
"""Multiply two numbers and returns the product"""
return a * b
multiply_tool = FunctionTool.from_defaults(fn=multiply)
llm = OpenAI(model="gpt-4o", temperature=0.4)
agent = ReActAgent.from_tools(
[multiply_tool],
llm=llm,
verbose=True
)
budget_tool = QueryEngineTool.from_defaults(
query_engine,
name="canadian_budget_2023",
description="A RAG engine with some basic facts",
)
llm = OpenAI(model="gpt-4o", temperature=0.4)
agent = ReActAgent.from_tools(
[budget_tool],
llm=llm,
verbose=True
)
# create an agent
def get_the_secret_fact() -> str:
"""Returns the secret fact."""
return "The secret fact is: A baby llama is called a 'Cria'."
tool = FunctionTool.from_defaults(fn=get_the_secret_fact)
worker1 = FunctionCallingAgentWorker.from_tools([tool], llm=OpenAI())
worker2 = FunctionCallingAgentWorker.from_tools([], llm=OpenAI())
agent1 = worker1.as_agent()
agent2 = worker2.as_agent()
message_queue = SimpleMessageQueue()
queue_client = message_queue.client
agent_server_1 = AgentService(
agent=agent1,
message_queue=queue_client,
description="Useful for getting the secret fact.",
service_name="secret_fact_agent",
host="127.0.0.1",
port=8002,
)
agent_server_2 = AgentService(
agent=agent2,
message_queue=queue_client,
description="Useful for getting random facts.",
service_name="random_fact_agent",
host="127.0.0.1",
port=8003,
)
control_plane = ControlPlaneServer(
message_queue=queue_client,
orchestrator=AgentOrchestrator(llm=OpenAI()),
)
launcher = ServerLauncher(
[agent_server_1, agent_server_2],
control_plane,
message_queue,
additional_consumers=[],
)
launcher.launch_servers()
Monitor:
llama-agents monitor --control-plane-url http://127.0.0.1:8000
Follow me on Twitter:
@seldo
Please don't add me on LinkedIn.