prompt
stringlengths
70
19.8k
completion
stringlengths
8
1.03k
api
stringlengths
23
93
import os from getpass import getpass if os.getenv("OPENAI_API_KEY") is None: os.environ["OPENAI_API_KEY"] = getpass( "Paste your OpenAI key from:" " https://platform.openai.com/account/api-keys\n" ) assert os.getenv("OPENAI_API_KEY", "").startswith( "sk-" ), "This doesn't look like a valid OpenAI API key" print("OpenAI API key configured") get_ipython().run_line_magic('pip', 'install -q html2text llama-index pandas pyarrow tqdm') get_ipython().run_line_magic('pip', 'install -q llama-index-readers-web') get_ipython().run_line_magic('pip', 'install -q llama-index-callbacks-openinference') import hashlib import json from pathlib import Path import os import textwrap from typing import List, Union import llama_index.core from llama_index.readers.web import SimpleWebPageReader from llama_index.core import VectorStoreIndex from llama_index.core.node_parser import SentenceSplitter from llama_index.core.callbacks import CallbackManager from llama_index.callbacks.openinference import OpenInferenceCallbackHandler from llama_index.callbacks.openinference.base import ( as_dataframe, QueryData, NodeData, ) from llama_index.core.node_parser import SimpleNodeParser import pandas as pd from tqdm import tqdm documents = SimpleWebPageReader().load_data( [ "https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt" ] ) print(documents[0].text) parser = SentenceSplitter() nodes = parser.get_nodes_from_documents(documents) print(nodes[0].text) callback_handler =
OpenInferenceCallbackHandler()
llama_index.callbacks.openinference.OpenInferenceCallbackHandler
get_ipython().system('pip install llama-index-multi-modal-llms-ollama') get_ipython().system('pip install llama-index-readers-file') get_ipython().system('pip install unstructured') get_ipython().system('pip install llama-index-embeddings-huggingface') get_ipython().system('pip install llama-index-vector-stores-qdrant') get_ipython().system('pip install llama-index-embeddings-clip') from llama_index.multi_modal_llms.ollama import OllamaMultiModal mm_model = OllamaMultiModal(model="llava:13b") from pathlib import Path from llama_index.core import SimpleDirectoryReader from PIL import Image import matplotlib.pyplot as plt input_image_path = Path("restaurant_images") if not input_image_path.exists(): Path.mkdir(input_image_path) get_ipython().system('wget "https://docs.google.com/uc?export=download&id=1GlqcNJhGGbwLKjJK1QJ_nyswCTQ2K2Fq" -O ./restaurant_images/fried_chicken.png') image_documents = SimpleDirectoryReader("./restaurant_images").load_data() imageUrl = "./restaurant_images/fried_chicken.png" image = Image.open(imageUrl).convert("RGB") plt.figure(figsize=(16, 5)) plt.imshow(image) from pydantic import BaseModel class Restaurant(BaseModel): """Data model for an restaurant.""" restaurant: str food: str discount: str price: str rating: str review: str from llama_index.core.program import MultiModalLLMCompletionProgram from llama_index.core.output_parsers import PydanticOutputParser prompt_template_str = """\ {query_str} Return the answer as a Pydantic object. The Pydantic schema is given below: """ mm_program = MultiModalLLMCompletionProgram.from_defaults( output_parser=PydanticOutputParser(Restaurant), image_documents=image_documents, prompt_template_str=prompt_template_str, multi_modal_llm=mm_model, verbose=True, ) response = mm_program(query_str="Can you summarize what is in the image?") for res in response: print(res) get_ipython().system('wget "https://www.dropbox.com/scl/fi/mlaymdy1ni1ovyeykhhuk/tesla_2021_10k.htm?rlkey=qf9k4zn0ejrbm716j0gg7r802&dl=1" -O tesla_2021_10k.htm') get_ipython().system('wget "https://docs.google.com/uc?export=download&id=1THe1qqM61lretr9N3BmINc_NWDvuthYf" -O shanghai.jpg') from pathlib import Path from llama_index.readers.file import UnstructuredReader from llama_index.core.schema import ImageDocument loader = UnstructuredReader() documents = loader.load_data(file=Path("tesla_2021_10k.htm")) image_doc = ImageDocument(image_path="./shanghai.jpg") from llama_index.core import VectorStoreIndex from llama_index.core.embeddings import resolve_embed_model embed_model =
resolve_embed_model("local:BAAI/bge-m3")
llama_index.core.embeddings.resolve_embed_model
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-qdrant') get_ipython().run_line_magic('pip', 'install llama-index-readers-file') get_ipython().run_line_magic('pip', 'install llama-index-multi-modal-llms-replicate') get_ipython().run_line_magic('pip', 'install unstructured replicate') get_ipython().run_line_magic('pip', 'install llama_index ftfy regex tqdm') get_ipython().run_line_magic('pip', 'install git+https://github.com/openai/CLIP.git') get_ipython().run_line_magic('pip', 'install torch torchvision') get_ipython().run_line_magic('pip', 'install matplotlib scikit-image') get_ipython().run_line_magic('pip', 'install -U qdrant_client') import os REPLICATE_API_TOKEN = "..." # Your Relicate API token here os.environ["REPLICATE_API_TOKEN"] = REPLICATE_API_TOKEN get_ipython().system('wget "https://www.dropbox.com/scl/fi/mlaymdy1ni1ovyeykhhuk/tesla_2021_10k.htm?rlkey=qf9k4zn0ejrbm716j0gg7r802&dl=1" -O tesla_2021_10k.htm') get_ipython().system('wget "https://docs.google.com/uc?export=download&id=1UU0xc3uLXs-WG0aDQSXjGacUkp142rLS" -O texas.jpg') from llama_index.readers.file import FlatReader from pathlib import Path from llama_index.core.node_parser import UnstructuredElementNodeParser reader = FlatReader() docs_2021 = reader.load_data(Path("tesla_2021_10k.htm")) node_parser = UnstructuredElementNodeParser() import openai OPENAI_API_TOKEN = "..." openai.api_key = OPENAI_API_TOKEN # add your openai api key here os.environ["OPENAI_API_KEY"] = OPENAI_API_TOKEN import os import pickle if not os.path.exists("2021_nodes.pkl"): raw_nodes_2021 = node_parser.get_nodes_from_documents(docs_2021) pickle.dump(raw_nodes_2021, open("2021_nodes.pkl", "wb")) else: raw_nodes_2021 = pickle.load(open("2021_nodes.pkl", "rb")) nodes_2021, objects_2021 = node_parser.get_nodes_and_objects(raw_nodes_2021) from llama_index.core import VectorStoreIndex vector_index = VectorStoreIndex(nodes=nodes_2021, objects=objects_2021) query_engine = vector_index.as_query_engine(similarity_top_k=5, verbose=True) from PIL import Image import matplotlib.pyplot as plt imageUrl = "./texas.jpg" image = Image.open(imageUrl).convert("RGB") plt.figure(figsize=(16, 5)) plt.imshow(image) from llama_index.multi_modal_llms.replicate import ReplicateMultiModal from llama_index.core.schema import ImageDocument from llama_index.multi_modal_llms.replicate.base import ( REPLICATE_MULTI_MODAL_LLM_MODELS, ) print(imageUrl) llava_multi_modal_llm = ReplicateMultiModal( model=REPLICATE_MULTI_MODAL_LLM_MODELS["llava-13b"], max_new_tokens=200, temperature=0.1, ) prompt = "which Tesla factory is shown in the image? Please answer just the name of the factory." llava_response = llava_multi_modal_llm.complete( prompt=prompt, image_documents=[ImageDocument(image_path=imageUrl)], ) print(llava_response.text) rag_response = query_engine.query(llava_response.text) print(rag_response) input_image_path = Path("instagram_images") if not input_image_path.exists(): Path.mkdir(input_image_path) get_ipython().system('wget "https://docs.google.com/uc?export=download&id=12ZpBBFkYu-jzz1iz356U5kMikn4uN9ww" -O ./instagram_images/jordan.png') from pydantic import BaseModel class InsAds(BaseModel): """Data model for a Ins Ads.""" account: str brand: str product: str category: str discount: str price: str comments: str review: str description: str from PIL import Image import matplotlib.pyplot as plt ins_imageUrl = "./instagram_images/jordan.png" image = Image.open(ins_imageUrl).convert("RGB") plt.figure(figsize=(16, 5)) plt.imshow(image) from llama_index.multi_modal_llms.replicate import ReplicateMultiModal from llama_index.core.program import MultiModalLLMCompletionProgram from llama_index.core.output_parsers import PydanticOutputParser from llama_index.multi_modal_llms.replicate.base import ( REPLICATE_MULTI_MODAL_LLM_MODELS, ) prompt_template_str = """\ can you summarize what is in the image\ and return the answer with json format \ """ def pydantic_llava( model_name, output_class, image_documents, prompt_template_str ): mm_llm = ReplicateMultiModal( model=REPLICATE_MULTI_MODAL_LLM_MODELS["llava-13b"], max_new_tokens=1000, ) llm_program = MultiModalLLMCompletionProgram.from_defaults( output_parser=
PydanticOutputParser(output_class)
llama_index.core.output_parsers.PydanticOutputParser
get_ipython().run_line_magic('pip', 'install llama-index-evaluation-tonic-validate') import json import pandas as pd from llama_index.core import VectorStoreIndex, SimpleDirectoryReader from llama_index.evaluation.tonic_validate import ( AnswerConsistencyEvaluator, AnswerSimilarityEvaluator, AugmentationAccuracyEvaluator, AugmentationPrecisionEvaluator, RetrievalPrecisionEvaluator, TonicValidateEvaluator, ) question = "What makes Sam Altman a good founder?" reference_answer = "He is smart and has a great force of will." llm_answer = "He is a good founder because he is smart." retrieved_context_list = [ "Sam Altman is a good founder. He is very smart.", "What makes Sam Altman such a good founder is his great force of will.", ] answer_similarity_evaluator = AnswerSimilarityEvaluator() score = await answer_similarity_evaluator.aevaluate( question, llm_answer, retrieved_context_list, reference_response=reference_answer, ) score answer_consistency_evaluator = AnswerConsistencyEvaluator() score = await answer_consistency_evaluator.aevaluate( question, llm_answer, retrieved_context_list ) score augmentation_accuracy_evaluator = AugmentationAccuracyEvaluator() score = await augmentation_accuracy_evaluator.aevaluate( question, llm_answer, retrieved_context_list ) score augmentation_precision_evaluator = AugmentationPrecisionEvaluator() score = await augmentation_precision_evaluator.aevaluate( question, llm_answer, retrieved_context_list ) score retrieval_precision_evaluator = RetrievalPrecisionEvaluator() score = await retrieval_precision_evaluator.aevaluate( question, llm_answer, retrieved_context_list ) score tonic_validate_evaluator = TonicValidateEvaluator() scores = await tonic_validate_evaluator.aevaluate( question, llm_answer, retrieved_context_list, reference_response=reference_answer, ) scores.score_dict tonic_validate_evaluator = TonicValidateEvaluator() scores = await tonic_validate_evaluator.aevaluate_run( [question], [llm_answer], [retrieved_context_list], [reference_answer] ) scores.run_data[0].scores get_ipython().system('llamaindex-cli download-llamadataset EvaluatingLlmSurveyPaperDataset --download-dir ./data') from llama_index.core import SimpleDirectoryReader from llama_index.core.llama_dataset import LabelledRagDataset from llama_index.core import VectorStoreIndex rag_dataset = LabelledRagDataset.from_json("./data/rag_dataset.json") documents = SimpleDirectoryReader(input_dir="./data/source_files").load_data( num_workers=4 ) # parallel loading index =
VectorStoreIndex.from_documents(documents=documents)
llama_index.core.VectorStoreIndex.from_documents
import os from PIL import Image from IPython.display import display from llama_index.tools.openai.image_generation import OpenAIImageGenerationToolSpec from llama_index.agent import ReActAgent from llama_index.tools import FunctionTool def show_image(filename: str) -> Image: """Display an image based on the filename""" img = Image.open(filename) return display(img) image_generation_tool = OpenAIImageGenerationToolSpec( api_key=os.environ["OPENAI_API_KEY"] ) show_image_tool =
FunctionTool.from_defaults(fn=show_image)
llama_index.tools.FunctionTool.from_defaults
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-huggingface') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') import os import openai os.environ["OPENAI_API_KEY"] = "sk-..." from llama_index.embeddings.huggingface import HuggingFaceEmbedding from llama_index.llms.openai import OpenAI from llama_index.core import Settings Settings.llm = OpenAI(model="gpt-3.5-turbo-instruct", temperature=0.1) Settings.embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-base-en-v1.5") get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") from llama_index.core import SimpleDirectoryReader documents =
SimpleDirectoryReader("./data/paul_graham/")
llama_index.core.SimpleDirectoryReader
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-readers-file') import nest_asyncio nest_asyncio.apply() get_ipython().system('mkdir data && wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"') get_ipython().system('pip install llama_hub') from pathlib import Path from llama_index.readers.file import PyMuPDFReader from llama_index.core import Document from llama_index.core.node_parser import SentenceSplitter from llama_index.core.schema import IndexNode docs0 = PyMuPDFReader().load(file_path=Path("./data/llama2.pdf")) doc_text = "\n\n".join([d.get_content() for d in docs0]) docs = [Document(text=doc_text)] node_parser = SentenceSplitter(chunk_size=1024) base_nodes = node_parser.get_nodes_from_documents(docs) from llama_index.core import VectorStoreIndex from llama_index.llms.openai import OpenAI from llama_index.core import Settings Settings.llm =
OpenAI(model="gpt-3.5-turbo")
llama_index.llms.openai.OpenAI
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-elasticsearch') get_ipython().system('pip install llama-index') import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) import os import getpass os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:") import openai openai.api_key = os.environ["OPENAI_API_KEY"] from llama_index.core import VectorStoreIndex, StorageContext from llama_index.vector_stores.elasticsearch import ElasticsearchStore from llama_index.core.schema import TextNode nodes = [ TextNode( text=( "A bunch of scientists bring back dinosaurs and mayhem breaks" " loose" ), metadata={"year": 1993, "rating": 7.7, "genre": "science fiction"}, ), TextNode( text=( "Leo DiCaprio gets lost in a dream within a dream within a dream" " within a ..." ), metadata={ "year": 2010, "director": "Christopher Nolan", "rating": 8.2, }, ), TextNode( text=( "A psychologist / detective gets lost in a series of dreams within" " dreams within dreams and Inception reused the idea" ), metadata={"year": 2006, "director": "Satoshi Kon", "rating": 8.6}, ), TextNode( text=( "A bunch of normal-sized women are supremely wholesome and some" " men pine after them" ), metadata={"year": 2019, "director": "Greta Gerwig", "rating": 8.3}, ), TextNode( text="Toys come alive and have a blast doing so", metadata={"year": 1995, "genre": "animated"}, ), ] vector_store = ElasticsearchStore( index_name="auto_retriever_movies", es_url="http://localhost:9200" ) storage_context = StorageContext.from_defaults(vector_store=vector_store) index =
VectorStoreIndex(nodes, storage_context=storage_context)
llama_index.core.VectorStoreIndex
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') import os import openai os.environ["OPENAI_API_KEY"] = "sk-.." openai.api_key = os.environ["OPENAI_API_KEY"] from IPython.display import Markdown, display from sqlalchemy import ( create_engine, MetaData, Table, Column, String, Integer, select, ) engine = create_engine("sqlite:///:memory:") metadata_obj = MetaData() table_name = "city_stats" city_stats_table = Table( table_name, metadata_obj, Column("city_name", String(16), primary_key=True), Column("population", Integer), Column("country", String(16), nullable=False), ) metadata_obj.create_all(engine) from llama_index.core import SQLDatabase from llama_index.llms.openai import OpenAI llm = OpenAI(temperature=0.1, model="gpt-3.5-turbo") sql_database = SQLDatabase(engine, include_tables=["city_stats"]) sql_database = SQLDatabase(engine, include_tables=["city_stats"]) from sqlalchemy import insert rows = [ {"city_name": "Toronto", "population": 2930000, "country": "Canada"}, {"city_name": "Tokyo", "population": 13960000, "country": "Japan"}, { "city_name": "Chicago", "population": 2679000, "country": "United States", }, {"city_name": "Seoul", "population": 9776000, "country": "South Korea"}, ] for row in rows: stmt = insert(city_stats_table).values(**row) with engine.begin() as connection: cursor = connection.execute(stmt) stmt = select( city_stats_table.c.city_name, city_stats_table.c.population, city_stats_table.c.country, ).select_from(city_stats_table) with engine.connect() as connection: results = connection.execute(stmt).fetchall() print(results) from sqlalchemy import text with engine.connect() as con: rows = con.execute(text("SELECT city_name from city_stats")) for row in rows: print(row) from llama_index.core.query_engine import NLSQLTableQueryEngine query_engine = NLSQLTableQueryEngine( sql_database=sql_database, tables=["city_stats"], llm=llm ) query_str = "Which city has the highest population?" response = query_engine.query(query_str) display(Markdown(f"<b>{response}</b>")) from llama_index.core.indices.struct_store.sql_query import ( SQLTableRetrieverQueryEngine, ) from llama_index.core.objects import ( SQLTableNodeMapping, ObjectIndex, SQLTableSchema, ) from llama_index.core import VectorStoreIndex table_node_mapping = SQLTableNodeMapping(sql_database) table_schema_objs = [ (SQLTableSchema(table_name="city_stats")) ] # add a SQLTableSchema for each table obj_index = ObjectIndex.from_objects( table_schema_objs, table_node_mapping, VectorStoreIndex, ) query_engine = SQLTableRetrieverQueryEngine( sql_database, obj_index.as_retriever(similarity_top_k=1) ) response = query_engine.query("Which city has the highest population?") display(Markdown(f"<b>{response}</b>")) response.metadata["result"] city_stats_text = ( "This table gives information regarding the population and country of a" " given city.\nThe user will query with codewords, where 'foo' corresponds" " to population and 'bar'corresponds to city." ) table_node_mapping =
SQLTableNodeMapping(sql_database)
llama_index.core.objects.SQLTableNodeMapping
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai') get_ipython().run_line_magic('pip', 'install llama-index-readers-file') get_ipython().run_line_magic('pip', 'install llama-index-experimental-param-tuner') get_ipython().system('pip install llama-index llama-hub') get_ipython().system('mkdir data && wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"') import nest_asyncio nest_asyncio.apply() from pathlib import Path from llama_index.readers.file import PDFReader from llama_index.readers.file import UnstructuredReader from llama_index.readers.file import PyMuPDFReader loader = PDFReader() docs0 = loader.load_data(file=Path("./data/llama2.pdf")) from llama_index.core import Document doc_text = "\n\n".join([d.get_content() for d in docs0]) docs = [Document(text=doc_text)] from llama_index.core.node_parser import SimpleNodeParser from llama_index.core.schema import IndexNode get_ipython().system('wget "https://www.dropbox.com/scl/fi/fh9vsmmm8vu0j50l3ss38/llama2_eval_qr_dataset.json?rlkey=kkoaez7aqeb4z25gzc06ak6kb&dl=1" -O data/llama2_eval_qr_dataset.json') from llama_index.core.evaluation import QueryResponseDataset eval_dataset = QueryResponseDataset.from_json( "data/llama2_eval_qr_dataset.json" ) eval_qs = eval_dataset.questions ref_response_strs = [r for (_, r) in eval_dataset.qr_pairs] from llama_index.core import ( VectorStoreIndex, load_index_from_storage, StorageContext, ) from llama_index.experimental.param_tuner import ParamTuner from llama_index.core.param_tuner.base import TunedResult, RunResult from llama_index.core.evaluation.eval_utils import ( get_responses, aget_responses, ) from llama_index.core.evaluation import ( SemanticSimilarityEvaluator, BatchEvalRunner, ) from llama_index.llms.openai import OpenAI from llama_index.embeddings.openai import OpenAIEmbedding import os import numpy as np from pathlib import Path def _build_index(chunk_size, docs): index_out_path = f"./storage_{chunk_size}" if not os.path.exists(index_out_path): Path(index_out_path).mkdir(parents=True, exist_ok=True) node_parser = SimpleNodeParser.from_defaults(chunk_size=chunk_size) base_nodes = node_parser.get_nodes_from_documents(docs) index =
VectorStoreIndex(base_nodes)
llama_index.core.VectorStoreIndex
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') from llama_index.core.evaluation import CorrectnessEvaluator from llama_index.llms.openai import OpenAI llm = OpenAI("gpt-4") evaluator =
CorrectnessEvaluator(llm=llm)
llama_index.core.evaluation.CorrectnessEvaluator
import os os.environ["OPENAI_API_KEY"] = "sk-..." from llama_index.core import VectorStoreIndex, SimpleDirectoryReader from llama_index.core.postprocessor import ( FixedRecencyPostprocessor, EmbeddingRecencyPostprocessor, ) from llama_index.core.node_parser import SentenceSplitter from llama_index.core.storage.docstore import SimpleDocumentStore from llama_index.core.response.notebook_utils import display_response from llama_index.core import StorageContext def get_file_metadata(file_name: str): """Get file metadata.""" if "v1" in file_name: return {"date": "2020-01-01"} elif "v2" in file_name: return {"date": "2020-02-03"} elif "v3" in file_name: return {"date": "2022-04-12"} else: raise ValueError("invalid file") documents = SimpleDirectoryReader( input_files=[ "test_versioned_data/paul_graham_essay_v1.txt", "test_versioned_data/paul_graham_essay_v2.txt", "test_versioned_data/paul_graham_essay_v3.txt", ], file_metadata=get_file_metadata, ).load_data() from llama_index.core import Settings Settings.text_splitter = SentenceSplitter(chunk_size=512) nodes = Settings.text_splitter.get_nodes_from_documents(documents) docstore =
SimpleDocumentStore()
llama_index.core.storage.docstore.SimpleDocumentStore
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') from llama_index.core import PromptTemplate text_qa_template_str = ( "Context information is" " below.\n---------------------\n{context_str}\n---------------------\nUsing" " both the context information and also using your own knowledge, answer" " the question: {query_str}\nIf the context isn't helpful, you can also" " answer the question on your own.\n" ) text_qa_template = PromptTemplate(text_qa_template_str) refine_template_str = ( "The original question is as follows: {query_str}\nWe have provided an" " existing answer: {existing_answer}\nWe have the opportunity to refine" " the existing answer (only if needed) with some more context" " below.\n------------\n{context_msg}\n------------\nUsing both the new" " context and your own knowledge, update or repeat the existing answer.\n" ) refine_template = PromptTemplate(refine_template_str) import openai import os os.environ["OPENAI_API_KEY"] = "sk-..." openai.api_key = os.environ["OPENAI_API_KEY"] get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") from llama_index.core import VectorStoreIndex, SimpleDirectoryReader from llama_index.llms.openai import OpenAI llm = OpenAI(model="gpt-3.5-turbo") documents =
SimpleDirectoryReader("./data/paul_graham/")
llama_index.core.SimpleDirectoryReader
get_ipython().run_line_magic('pip', 'install llama-index-llms-litellm') get_ipython().run_line_magic('pip', 'install llama-index-embeddings-cohere') get_ipython().system('pip install llama-index') import os cohere_api_key = "YOUR_API_KEY" os.environ["COHERE_API_KEY"] = cohere_api_key from llama_index.embeddings.cohere import CohereEmbedding embed_model = CohereEmbedding( cohere_api_key=cohere_api_key, model_name="embed-english-v3.0", input_type="search_query", ) embeddings = embed_model.get_text_embedding("Hello CohereAI!") print(len(embeddings)) print(embeddings[:5]) embed_model = CohereEmbedding( cohere_api_key=cohere_api_key, model_name="embed-english-v3.0", input_type="search_document", ) embeddings = embed_model.get_text_embedding("Hello CohereAI!") print(len(embeddings)) print(embeddings[:5]) embed_model = CohereEmbedding( cohere_api_key=cohere_api_key, model_name="embed-english-v2.0" ) embeddings = embed_model.get_text_embedding("Hello CohereAI!") print(len(embeddings)) print(embeddings[:5]) import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) from llama_index.core import VectorStoreIndex, SimpleDirectoryReader from llama_index.llms.litellm import LiteLLM from llama_index.core.response.notebook_utils import display_source_node from IPython.display import Markdown, display get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") documents = SimpleDirectoryReader("./data/paul_graham/").load_data() llm =
LiteLLM("command-nightly")
llama_index.llms.litellm.LiteLLM
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') import os import openai os.environ["OPENAI_API_KEY"] = "sk-..." openai.api_key = os.environ["OPENAI_API_KEY"] import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.WARNING) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) import nest_asyncio nest_asyncio.apply() from llama_index.core import SimpleDirectoryReader, get_response_synthesizer from llama_index.core import DocumentSummaryIndex from llama_index.llms.openai import OpenAI from llama_index.core.node_parser import SentenceSplitter wiki_titles = ["Toronto", "Seattle", "Chicago", "Boston", "Houston"] from pathlib import Path import requests for title in wiki_titles: response = requests.get( "https://en.wikipedia.org/w/api.php", params={ "action": "query", "format": "json", "titles": title, "prop": "extracts", "explaintext": True, }, ).json() page = next(iter(response["query"]["pages"].values())) wiki_text = page["extract"] data_path = Path("data") if not data_path.exists(): Path.mkdir(data_path) with open(data_path / f"{title}.txt", "w") as fp: fp.write(wiki_text) city_docs = [] for wiki_title in wiki_titles: docs = SimpleDirectoryReader( input_files=[f"data/{wiki_title}.txt"] ).load_data() docs[0].doc_id = wiki_title city_docs.extend(docs) chatgpt = OpenAI(temperature=0, model="gpt-3.5-turbo") splitter = SentenceSplitter(chunk_size=1024) response_synthesizer = get_response_synthesizer( response_mode="tree_summarize", use_async=True ) doc_summary_index = DocumentSummaryIndex.from_documents( city_docs, llm=chatgpt, transformations=[splitter], response_synthesizer=response_synthesizer, show_progress=True, ) doc_summary_index.get_document_summary("Boston") doc_summary_index.storage_context.persist("index") from llama_index.core import load_index_from_storage from llama_index.core import StorageContext storage_context = StorageContext.from_defaults(persist_dir="index") doc_summary_index =
load_index_from_storage(storage_context)
llama_index.core.load_index_from_storage
get_ipython().run_line_magic('pip', 'install llama-index-agent-openai') get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') import os os.environ["OPENAI_API_KEY"] = "sk-..." from llama_index.embeddings.openai import OpenAIEmbedding from llama_index.llms.openai import OpenAI from llama_index.core import Settings Settings.llm = OpenAI(model="gpt-4") Settings.embed_model = OpenAIEmbedding(model="text-embedding-3-small") from llama_index.core import SimpleDirectoryReader wiki_titles = ["Toronto", "Seattle", "Chicago", "Boston", "Houston"] from pathlib import Path import requests for title in wiki_titles: response = requests.get( "https://en.wikipedia.org/w/api.php", params={ "action": "query", "format": "json", "titles": title, "prop": "extracts", "explaintext": True, }, ).json() page = next(iter(response["query"]["pages"].values())) wiki_text = page["extract"] data_path = Path("data") if not data_path.exists(): Path.mkdir(data_path) with open(data_path / f"{title}.txt", "w") as fp: fp.write(wiki_text) city_docs = {} for wiki_title in wiki_titles: city_docs[wiki_title] = SimpleDirectoryReader( input_files=[f"data/{wiki_title}.txt"] ).load_data() from llama_index.core import VectorStoreIndex from llama_index.agent.openai import OpenAIAgent from llama_index.core.tools import QueryEngineTool, ToolMetadata from llama_index.core import VectorStoreIndex tool_dict = {} for wiki_title in wiki_titles: vector_index = VectorStoreIndex.from_documents( city_docs[wiki_title], ) vector_query_engine = vector_index.as_query_engine(llm=llm) vector_tool = QueryEngineTool( query_engine=vector_query_engine, metadata=ToolMetadata( name=wiki_title, description=("Useful for questions related to" f" {wiki_title}"), ), ) tool_dict[wiki_title] = vector_tool from llama_index.core import VectorStoreIndex from llama_index.core.objects import ObjectIndex, SimpleToolNodeMapping tool_mapping = SimpleToolNodeMapping.from_objects(list(tool_dict.values())) tool_index = ObjectIndex.from_objects( list(tool_dict.values()), tool_mapping, VectorStoreIndex, ) tool_retriever = tool_index.as_retriever(similarity_top_k=1) from llama_index.core.llms import ChatMessage from llama_index.core import ChatPromptTemplate from typing import List GEN_SYS_PROMPT_STR = """\ Task information is given below. Given the task, please generate a system prompt for an OpenAI-powered bot to solve this task: {task} \ """ gen_sys_prompt_messages = [ ChatMessage( role="system", content="You are helping to build a system prompt for another bot.", ), ChatMessage(role="user", content=GEN_SYS_PROMPT_STR), ] GEN_SYS_PROMPT_TMPL = ChatPromptTemplate(gen_sys_prompt_messages) agent_cache = {} def create_system_prompt(task: str): """Create system prompt for another agent given an input task.""" llm = OpenAI(llm="gpt-4") fmt_messages = GEN_SYS_PROMPT_TMPL.format_messages(task=task) response = llm.chat(fmt_messages) return response.message.content def get_tools(task: str): """Get the set of relevant tools to use given an input task.""" subset_tools = tool_retriever.retrieve(task) return [t.metadata.name for t in subset_tools] def create_agent(system_prompt: str, tool_names: List[str]): """Create an agent given a system prompt and an input set of tools.""" llm = OpenAI(model="gpt-4") try: input_tools = [tool_dict[tn] for tn in tool_names] agent =
OpenAIAgent.from_tools(input_tools, llm=llm, verbose=True)
llama_index.agent.openai.OpenAIAgent.from_tools
get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia') get_ipython().run_line_magic('pip', 'install llama-index-finetuning') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-finetuning-callbacks') get_ipython().run_line_magic('pip', 'install llama-index-llms-huggingface') import nest_asyncio nest_asyncio.apply() import os HUGGING_FACE_TOKEN = os.getenv("HUGGING_FACE_TOKEN") OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") import pandas as pd def display_eval_df(question, source, answer_a, answer_b, result) -> None: """Pretty print question/answer + gpt-4 judgement dataset.""" eval_df = pd.DataFrame( { "Question": question, "Source": source, "Model A": answer_a["model"], "Answer A": answer_a["text"], "Model B": answer_b["model"], "Answer B": answer_b["text"], "Score": result.score, "Judgement": result.feedback, }, index=[0], ) eval_df = eval_df.style.set_properties( **{ "inline-size": "300px", "overflow-wrap": "break-word", }, subset=["Answer A", "Answer B"] ) display(eval_df) get_ipython().system('pip install wikipedia -q') from llama_index.readers.wikipedia import WikipediaReader train_cities = [ "San Francisco", "Toronto", "New York", "Vancouver", "Montreal", "Boston", ] test_cities = [ "Tokyo", "Singapore", "Paris", ] train_documents = WikipediaReader().load_data( pages=[f"History of {x}" for x in train_cities] ) test_documents = WikipediaReader().load_data( pages=[f"History of {x}" for x in test_cities] ) QUESTION_GEN_PROMPT = ( "You are a Teacher/ Professor. Your task is to setup " "a quiz/examination. Using the provided context, formulate " "a single question that captures an important fact from the " "context. Restrict the question to the context information provided." ) from llama_index.core.evaluation import DatasetGenerator from llama_index.llms.openai import OpenAI llm = OpenAI(model="gpt-3.5-turbo", temperature=0.3) train_dataset_generator = DatasetGenerator.from_documents( train_documents, question_gen_query=QUESTION_GEN_PROMPT, llm=llm, show_progress=True, num_questions_per_chunk=25, ) test_dataset_generator = DatasetGenerator.from_documents( test_documents, question_gen_query=QUESTION_GEN_PROMPT, llm=llm, show_progress=True, num_questions_per_chunk=25, ) train_questions = train_dataset_generator.generate_questions_from_nodes( num=200 ) test_questions = test_dataset_generator.generate_questions_from_nodes(num=150) len(train_questions), len(test_questions) train_questions[:3] test_questions[:3] from llama_index.core import VectorStoreIndex from llama_index.core.retrievers import VectorIndexRetriever train_index = VectorStoreIndex.from_documents(documents=train_documents) train_retriever = VectorIndexRetriever( index=train_index, similarity_top_k=2, ) test_index = VectorStoreIndex.from_documents(documents=test_documents) test_retriever = VectorIndexRetriever( index=test_index, similarity_top_k=2, ) from llama_index.core.query_engine import RetrieverQueryEngine from llama_index.llms.huggingface import HuggingFaceInferenceAPI def create_query_engine( hf_name: str, retriever: VectorIndexRetriever, hf_llm_generators: dict ) -> RetrieverQueryEngine: """Create a RetrieverQueryEngine using the HuggingFaceInferenceAPI LLM""" if hf_name not in hf_llm_generators: raise KeyError("model not listed in hf_llm_generators") llm = HuggingFaceInferenceAPI( model_name=hf_llm_generators[hf_name], context_window=2048, # to use refine token=HUGGING_FACE_TOKEN, ) return
RetrieverQueryEngine.from_args(retriever=retriever, llm=llm)
llama_index.core.query_engine.RetrieverQueryEngine.from_args
get_ipython().run_line_magic('pip', 'install llama-index-readers-file') get_ipython().run_line_magic('pip', 'install llama-index-agent-openai') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') import nest_asyncio nest_asyncio.apply() get_ipython().system("mkdir -p 'data/'") get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"') from pathlib import Path from llama_index.core import Document, VectorStoreIndex from llama_index.readers.file import PyMuPDFReader from llama_index.core.node_parser import SimpleNodeParser from llama_index.llms.openai import OpenAI loader =
PyMuPDFReader()
llama_index.readers.file.PyMuPDFReader
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) from llama_index.core import VectorStoreIndex, SimpleDirectoryReader from IPython.display import Markdown, display import os os.environ["OPENAI_API_KEY"] = "sk-..." documents = SimpleDirectoryReader("./data/paul_graham/").load_data() index = VectorStoreIndex.from_documents(documents, chunk_size=512) from llama_index.core.output_parsers import LangchainOutputParser from langchain.output_parsers import StructuredOutputParser, ResponseSchema response_schemas = [ ResponseSchema( name="Education", description=( "Describes the author's educational experience/background." ), ), ResponseSchema( name="Work", description="Describes the author's work experience/background.", ), ] lc_output_parser = StructuredOutputParser.from_response_schemas( response_schemas ) output_parser =
LangchainOutputParser(lc_output_parser)
llama_index.core.output_parsers.LangchainOutputParser
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-chroma') get_ipython().system('pip install llama-index') import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) import os import getpass import openai openai.api_key = "sk-" import chromadb chroma_client = chromadb.EphemeralClient() chroma_collection = chroma_client.create_collection("quickstart") from llama_index.core import VectorStoreIndex from llama_index.vector_stores.chroma import ChromaVectorStore from IPython.display import Markdown, display from llama_index.core.schema import TextNode nodes = [ TextNode( text="The Shawshank Redemption", metadata={ "author": "Stephen King", "theme": "Friendship", "year": 1994, }, ), TextNode( text="The Godfather", metadata={ "director": "Francis Ford Coppola", "theme": "Mafia", "year": 1972, }, ), TextNode( text="Inception", metadata={ "director": "Christopher Nolan", "theme": "Fiction", "year": 2010, }, ), TextNode( text="To Kill a Mockingbird", metadata={ "author": "Harper Lee", "theme": "Mafia", "year": 1960, }, ), TextNode( text="1984", metadata={ "author": "George Orwell", "theme": "Totalitarianism", "year": 1949, }, ), TextNode( text="The Great Gatsby", metadata={ "author": "F. Scott Fitzgerald", "theme": "The American Dream", "year": 1925, }, ), TextNode( text="Harry Potter and the Sorcerer's Stone", metadata={ "author": "J.K. Rowling", "theme": "Fiction", "year": 1997, }, ), ] from llama_index.core import StorageContext vector_store = ChromaVectorStore(chroma_collection=chroma_collection) storage_context = StorageContext.from_defaults(vector_store=vector_store) index = VectorStoreIndex(nodes, storage_context=storage_context) from llama_index.core.vector_stores import ( MetadataFilter, MetadataFilters, FilterOperator, ) filters = MetadataFilters( filters=[ MetadataFilter(key="theme", operator=FilterOperator.EQ, value="Mafia"), ] ) retriever = index.as_retriever(filters=filters) retriever.retrieve("What is inception about?") from llama_index.core.vector_stores import ExactMatchFilter, MetadataFilters filters = MetadataFilters( filters=[ MetadataFilter(key="theme", value="Mafia"), MetadataFilter(key="year", value=1972), ] ) retriever = index.as_retriever(filters=filters) retriever.retrieve("What is inception about?") from llama_index.core.vector_stores import FilterOperator, FilterCondition filters = MetadataFilters( filters=[ MetadataFilter(key="theme", value="Fiction"), MetadataFilter(key="year", value=1997, operator=FilterOperator.GT), ], condition=FilterCondition.AND, ) retriever = index.as_retriever(filters=filters) retriever.retrieve("Harry Potter?") from llama_index.core.vector_stores import FilterOperator, FilterCondition filters = MetadataFilters( filters=[
MetadataFilter(key="theme", value="Fiction")
llama_index.core.vector_stores.MetadataFilter
get_ipython().run_line_magic('pip', 'install llama-index-llms-vertex') from llama_index.llms.vertex import Vertex from google.oauth2 import service_account filename = "vertex-407108-37495ce6c303.json" credentials: service_account.Credentials = ( service_account.Credentials.from_service_account_file(filename) ) Vertex( model="text-bison", project=credentials.project_id, credentials=credentials ) from llama_index.llms.vertex import Vertex from llama_index.core.llms import ChatMessage, MessageRole llm = Vertex(model="text-bison", temperature=0, additional_kwargs={}) llm.complete("Hello this is a sample text").text (await llm.acomplete("hello")).text list(llm.stream_complete("hello"))[-1].text chat = Vertex(model="chat-bison") messages = [
ChatMessage(role=MessageRole.SYSTEM, content="Reply everything in french")
llama_index.core.llms.ChatMessage
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-chroma') get_ipython().system('pip install llama-index') import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) import os import getpass import openai openai.api_key = "sk-" import chromadb chroma_client = chromadb.EphemeralClient() chroma_collection = chroma_client.create_collection("quickstart") from llama_index.core import VectorStoreIndex from llama_index.vector_stores.chroma import ChromaVectorStore from IPython.display import Markdown, display from llama_index.core.schema import TextNode nodes = [ TextNode( text="The Shawshank Redemption", metadata={ "author": "Stephen King", "theme": "Friendship", "year": 1994, }, ), TextNode( text="The Godfather", metadata={ "director": "Francis Ford Coppola", "theme": "Mafia", "year": 1972, }, ), TextNode( text="Inception", metadata={ "director": "Christopher Nolan", "theme": "Fiction", "year": 2010, }, ), TextNode( text="To Kill a Mockingbird", metadata={ "author": "Harper Lee", "theme": "Mafia", "year": 1960, }, ), TextNode( text="1984", metadata={ "author": "George Orwell", "theme": "Totalitarianism", "year": 1949, }, ), TextNode( text="The Great Gatsby", metadata={ "author": "F. Scott Fitzgerald", "theme": "The American Dream", "year": 1925, }, ), TextNode( text="Harry Potter and the Sorcerer's Stone", metadata={ "author": "J.K. Rowling", "theme": "Fiction", "year": 1997, }, ), ] from llama_index.core import StorageContext vector_store = ChromaVectorStore(chroma_collection=chroma_collection) storage_context = StorageContext.from_defaults(vector_store=vector_store) index = VectorStoreIndex(nodes, storage_context=storage_context) from llama_index.core.vector_stores import ( MetadataFilter, MetadataFilters, FilterOperator, ) filters = MetadataFilters( filters=[ MetadataFilter(key="theme", operator=FilterOperator.EQ, value="Mafia"), ] ) retriever = index.as_retriever(filters=filters) retriever.retrieve("What is inception about?") from llama_index.core.vector_stores import ExactMatchFilter, MetadataFilters filters = MetadataFilters( filters=[ MetadataFilter(key="theme", value="Mafia"),
MetadataFilter(key="year", value=1972)
llama_index.core.vector_stores.MetadataFilter
get_ipython().run_line_magic('pip', 'install llama-index-agent-openai') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia') get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-pinecone') get_ipython().system('pip install llama-index') import pinecone import os api_key = os.environ["PINECONE_API_KEY"] pinecone.init(api_key=api_key, environment="us-west4-gcp-free") import os import getpass import openai openai.api_key = "sk-<your-key>" try: pinecone.create_index( "quickstart-index", dimension=1536, metric="euclidean", pod_type="p1" ) except Exception: pass pinecone_index = pinecone.Index("quickstart-index") pinecone_index.delete(deleteAll=True, namespace="test") from llama_index.core import VectorStoreIndex, StorageContext from llama_index.vector_stores.pinecone import PineconeVectorStore from llama_index.core.schema import TextNode nodes = [ TextNode( text=( "Michael Jordan is a retired professional basketball player," " widely regarded as one of the greatest basketball players of all" " time." ), metadata={ "category": "Sports", "country": "United States", "gender": "male", "born": 1963, }, ), TextNode( text=( "Angelina Jolie is an American actress, filmmaker, and" " humanitarian. She has received numerous awards for her acting" " and is known for her philanthropic work." ), metadata={ "category": "Entertainment", "country": "United States", "gender": "female", "born": 1975, }, ), TextNode( text=( "Elon Musk is a business magnate, industrial designer, and" " engineer. He is the founder, CEO, and lead designer of SpaceX," " Tesla, Inc., Neuralink, and The Boring Company." ), metadata={ "category": "Business", "country": "United States", "gender": "male", "born": 1971, }, ), TextNode( text=( "Rihanna is a Barbadian singer, actress, and businesswoman. She" " has achieved significant success in the music industry and is" " known for her versatile musical style." ), metadata={ "category": "Music", "country": "Barbados", "gender": "female", "born": 1988, }, ), TextNode( text=( "Cristiano Ronaldo is a Portuguese professional footballer who is" " considered one of the greatest football players of all time. He" " has won numerous awards and set multiple records during his" " career." ), metadata={ "category": "Sports", "country": "Portugal", "gender": "male", "born": 1985, }, ), ] vector_store = PineconeVectorStore( pinecone_index=pinecone_index, namespace="test" ) storage_context = StorageContext.from_defaults(vector_store=vector_store) index = VectorStoreIndex(nodes, storage_context=storage_context) from llama_index.core.tools import FunctionTool from llama_index.core.vector_stores import ( VectorStoreInfo, MetadataInfo, MetadataFilter, MetadataFilters, FilterCondition, FilterOperator, ) from llama_index.core.retrievers import VectorIndexRetriever from llama_index.core.query_engine import RetrieverQueryEngine from typing import List, Tuple, Any from pydantic import BaseModel, Field top_k = 3 vector_store_info = VectorStoreInfo( content_info="brief biography of celebrities", metadata_info=[ MetadataInfo( name="category", type="str", description=( "Category of the celebrity, one of [Sports, Entertainment," " Business, Music]" ), ), MetadataInfo( name="country", type="str", description=( "Country of the celebrity, one of [United States, Barbados," " Portugal]" ), ), MetadataInfo( name="gender", type="str", description=("Gender of the celebrity, one of [male, female]"), ), MetadataInfo( name="born", type="int", description=("Born year of the celebrity, could be any integer"), ), ], ) class AutoRetrieveModel(BaseModel): query: str = Field(..., description="natural language query string") filter_key_list: List[str] = Field( ..., description="List of metadata filter field names" ) filter_value_list: List[Any] = Field( ..., description=( "List of metadata filter field values (corresponding to names" " specified in filter_key_list)" ), ) filter_operator_list: List[str] = Field( ..., description=( "Metadata filters conditions (could be one of <, <=, >, >=, ==, !=)" ), ) filter_condition: str = Field( ..., description=("Metadata filters condition values (could be AND or OR)"), ) description = f"""\ Use this tool to look up biographical information about celebrities. The vector database schema is given below: {vector_store_info.json()} """ def auto_retrieve_fn( query: str, filter_key_list: List[str], filter_value_list: List[any], filter_operator_list: List[str], filter_condition: str, ): """Auto retrieval function. Performs auto-retrieval from a vector database, and then applies a set of filters. """ query = query or "Query" metadata_filters = [ MetadataFilter(key=k, value=v, operator=op) for k, v, op in zip( filter_key_list, filter_value_list, filter_operator_list ) ] retriever = VectorIndexRetriever( index, filters=MetadataFilters( filters=metadata_filters, condition=filter_condition ), top_k=top_k, ) query_engine = RetrieverQueryEngine.from_args(retriever) response = query_engine.query(query) return str(response) auto_retrieve_tool = FunctionTool.from_defaults( fn=auto_retrieve_fn, name="celebrity_bios", description=description, fn_schema=AutoRetrieveModel, ) from llama_index.agent.openai import OpenAIAgent from llama_index.llms.openai import OpenAI agent = OpenAIAgent.from_tools( [auto_retrieve_tool], llm=OpenAI(temperature=0, model="gpt-4-0613"), verbose=True, ) response = agent.chat("Tell me about two celebrities from the United States. ") print(str(response)) response = agent.chat("Tell me about two celebrities born after 1980. ") print(str(response)) response = agent.chat( "Tell me about few celebrities under category business and born after 1950. " ) print(str(response)) from sqlalchemy import ( create_engine, MetaData, Table, Column, String, Integer, select, column, ) from llama_index.core import SQLDatabase from llama_index.core.indices import SQLStructStoreIndex engine = create_engine("sqlite:///:memory:", future=True) metadata_obj = MetaData() table_name = "city_stats" city_stats_table = Table( table_name, metadata_obj, Column("city_name", String(16), primary_key=True), Column("population", Integer), Column("country", String(16), nullable=False), ) metadata_obj.create_all(engine) metadata_obj.tables.keys() from sqlalchemy import insert rows = [ {"city_name": "Toronto", "population": 2930000, "country": "Canada"}, {"city_name": "Tokyo", "population": 13960000, "country": "Japan"}, {"city_name": "Berlin", "population": 3645000, "country": "Germany"}, ] for row in rows: stmt = insert(city_stats_table).values(**row) with engine.begin() as connection: cursor = connection.execute(stmt) with engine.connect() as connection: cursor = connection.exec_driver_sql("SELECT * FROM city_stats") print(cursor.fetchall()) sql_database = SQLDatabase(engine, include_tables=["city_stats"]) from llama_index.core.query_engine import NLSQLTableQueryEngine query_engine = NLSQLTableQueryEngine( sql_database=sql_database, tables=["city_stats"], ) get_ipython().system('pip install wikipedia') from llama_index.readers.wikipedia import WikipediaReader from llama_index.core import SimpleDirectoryReader, VectorStoreIndex cities = ["Toronto", "Berlin", "Tokyo"] wiki_docs = WikipediaReader().load_data(pages=cities) import pinecone import os api_key = os.environ["PINECONE_API_KEY"] pinecone.init(api_key=api_key, environment="us-west1-gcp") pinecone_index = pinecone.Index("quickstart") pinecone_index.delete(deleteAll=True) from llama_index.core import Settings from llama_index.core import StorageContext from llama_index.vector_stores.pinecone import PineconeVectorStore from llama_index.core.node_parser import TokenTextSplitter from llama_index.llms.openai import OpenAI Settings.llm = OpenAI(temperature=0, model="gpt-4") Settings.node_parser = TokenTextSplitter(chunk_size=1024) vector_store = PineconeVectorStore( pinecone_index=pinecone_index, namespace="wiki_cities" ) storage_context = StorageContext.from_defaults(vector_store=vector_store) vector_index = VectorStoreIndex([], storage_context=storage_context) for city, wiki_doc in zip(cities, wiki_docs): nodes =
Settings.node_parser.get_nodes_from_documents([wiki_doc])
llama_index.core.Settings.node_parser.get_nodes_from_documents
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-indices-managed-colbert') get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-qdrant') get_ipython().run_line_magic('pip', 'install llama-index-llms-gemini') get_ipython().run_line_magic('pip', 'install llama-index-embeddings-gemini') get_ipython().run_line_magic('pip', 'install llama-index-indices-managed-vectara') get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-google') get_ipython().run_line_magic('pip', 'install llama-index-indices-managed-google') get_ipython().run_line_magic('pip', 'install llama-index-response-synthesizers-google') get_ipython().run_line_magic('pip', 'install llama-index') get_ipython().run_line_magic('pip', 'install "google-ai-generativelanguage>=0.4,<=1.0"') get_ipython().run_line_magic('pip', 'install torch sentence-transformers') get_ipython().run_line_magic('pip', 'install google-auth-oauthlib') from google.oauth2 import service_account from llama_index.indices.managed.google import GoogleIndex from llama_index.vector_stores.google import set_google_config credentials = service_account.Credentials.from_service_account_file( "service_account_key.json", scopes=[ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/generative-language.retriever", ], ) set_google_config(auth_credentials=credentials) project_name = "TODO-your-project-name" # @param {type:"string"} email = "[email protected]" # @param {type:"string"} client_file_name = "client_secret.json" get_ipython().system('gcloud config set project $project_name') get_ipython().system('gcloud config set account $email') get_ipython().system('gcloud auth application-default login --no-browser --client-id-file=$client_file_name --scopes="https://www.googleapis.com/auth/generative-language.retriever,https://www.googleapis.com/auth/cloud-platform"') get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") import os GOOGLE_API_KEY = "" # add your GOOGLE API key here os.environ["GOOGLE_API_KEY"] = GOOGLE_API_KEY from llama_index.core import SimpleDirectoryReader from llama_index.indices.managed.google import GoogleIndex google_index = GoogleIndex.create_corpus(display_name="My first corpus!") print(f"Newly created corpus ID is {google_index.corpus_id}.") documents = SimpleDirectoryReader("./data/paul_graham/").load_data() google_index.insert_documents(documents) google_index = GoogleIndex.from_corpus(corpus_id="") query_engine = google_index.as_query_engine() response = query_engine.query("which program did this author attend?") print(response) from llama_index.core.response.notebook_utils import display_source_node for r in response.source_nodes: display_source_node(r, source_length=1000) from google.ai.generativelanguage import ( GenerateAnswerRequest, ) query_engine = google_index.as_query_engine( temperature=0.3, answer_style=GenerateAnswerRequest.AnswerStyle.VERBOSE, ) response = query_engine.query("Which program did this author attend?") print(response) from llama_index.core.response.notebook_utils import display_source_node for r in response.source_nodes:
display_source_node(r, source_length=1000)
llama_index.core.response.notebook_utils.display_source_node
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-together') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai') get_ipython().run_line_magic('pip', 'install llama-index-readers-file') domain = "docs.llamaindex.ai" docs_url = "https://docs.llamaindex.ai/en/latest/" get_ipython().system('wget -e robots=off --recursive --no-clobber --page-requisites --html-extension --convert-links --restrict-file-names=windows --domains {domain} --no-parent {docs_url}') from llama_index.readers.file import UnstructuredReader from pathlib import Path from llama_index.llms.openai import OpenAI from llama_index.core import Document reader = UnstructuredReader() all_html_files = [ "docs.llamaindex.ai/en/latest/index.html", "docs.llamaindex.ai/en/latest/contributing/contributing.html", "docs.llamaindex.ai/en/latest/understanding/understanding.html", "docs.llamaindex.ai/en/latest/understanding/using_llms/using_llms.html", "docs.llamaindex.ai/en/latest/understanding/using_llms/privacy.html", "docs.llamaindex.ai/en/latest/understanding/loading/llamahub.html", "docs.llamaindex.ai/en/latest/optimizing/production_rag.html", "docs.llamaindex.ai/en/latest/module_guides/models/llms.html", ] doc_limit = 10 docs = [] for idx, f in enumerate(all_html_files): if idx > doc_limit: break print(f"Idx {idx}/{len(all_html_files)}") loaded_docs = reader.load_data(file=f, split_documents=True) start_idx = 64 loaded_doc = Document( id_=str(f), text="\n\n".join([d.get_content() for d in loaded_docs[start_idx:]]), metadata={"path": str(f)}, ) print(str(f)) docs.append(loaded_doc) from llama_index.embeddings.together import TogetherEmbedding from llama_index.embeddings.openai import OpenAIEmbedding from llama_index.llms.openai import OpenAI api_key = "<api_key>" embed_model = TogetherEmbedding( model_name="togethercomputer/m2-bert-80M-32k-retrieval", api_key=api_key ) llm = OpenAI(temperature=0, model="gpt-3.5-turbo") from llama_index.core.storage.docstore import SimpleDocumentStore for doc in docs: embedding = embed_model.get_text_embedding(doc.get_content()) doc.embedding = embedding docstore =
SimpleDocumentStore()
llama_index.core.storage.docstore.SimpleDocumentStore
get_ipython().run_line_magic('pip', 'install llama-index-finetuning') get_ipython().run_line_magic('pip', 'install llama-index-finetuning-callbacks') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') from llama_index.core import ( SimpleDirectoryReader, VectorStoreIndex, StorageContext, load_index_from_storage, ) from llama_index.llms.openai import OpenAI from llama_index.core.tools import QueryEngineTool, ToolMetadata llm_35 = OpenAI(model="gpt-3.5-turbo-0613", temperature=0.3) llm_4 = OpenAI(model="gpt-4-0613", temperature=0.3) try: storage_context = StorageContext.from_defaults( persist_dir="./storage/march" ) march_index = load_index_from_storage(storage_context) storage_context = StorageContext.from_defaults( persist_dir="./storage/june" ) june_index = load_index_from_storage(storage_context) storage_context = StorageContext.from_defaults( persist_dir="./storage/sept" ) sept_index = load_index_from_storage(storage_context) index_loaded = True except: index_loaded = False if not index_loaded: march_docs = SimpleDirectoryReader( input_files=["../../data/10q/uber_10q_march_2022.pdf"] ).load_data() june_docs = SimpleDirectoryReader( input_files=["../../data/10q/uber_10q_june_2022.pdf"] ).load_data() sept_docs = SimpleDirectoryReader( input_files=["../../data/10q/uber_10q_sept_2022.pdf"] ).load_data() march_index = VectorStoreIndex.from_documents( march_docs, ) june_index = VectorStoreIndex.from_documents( june_docs, ) sept_index = VectorStoreIndex.from_documents( sept_docs, ) march_index.storage_context.persist(persist_dir="./storage/march") june_index.storage_context.persist(persist_dir="./storage/june") sept_index.storage_context.persist(persist_dir="./storage/sept") march_engine = march_index.as_query_engine(similarity_top_k=3, llm=llm_35) june_engine = june_index.as_query_engine(similarity_top_k=3, llm=llm_35) sept_engine = sept_index.as_query_engine(similarity_top_k=3, llm=llm_35) from llama_index.core.tools import QueryEngineTool query_tool_sept = QueryEngineTool.from_defaults( query_engine=sept_engine, name="sept_2022", description=( f"Provides information about Uber quarterly financials ending" f" September 2022" ), ) query_tool_june = QueryEngineTool.from_defaults( query_engine=june_engine, name="june_2022", description=( f"Provides information about Uber quarterly financials ending June" f" 2022" ), ) query_tool_march = QueryEngineTool.from_defaults( query_engine=march_engine, name="march_2022", description=( f"Provides information about Uber quarterly financials ending March" f" 2022" ), ) query_engine_tools = [query_tool_march, query_tool_june, query_tool_sept] from llama_index.core.agent import ReActAgent from llama_index.llms.openai import OpenAI llm = OpenAI(model="gpt-3.5-turbo-0613") base_agent = ReActAgent.from_tools(query_engine_tools, llm=llm, verbose=True) response = base_agent.chat( "Analyze Uber revenue growth over the last few quarters" ) print(str(response)) print(str(response)) response = base_agent.chat( "Can you tell me about the risk factors in the quarter with the highest" " revenue growth?" ) print(str(response)) from llama_index.core.evaluation import DatasetGenerator base_question_gen_query = ( "You are a Teacher/ Professor. Your task is to setup a quiz/examination." " Using the provided context from the Uber March 10Q filing, formulate a" " single question that captures an important fact from the context." " context. Restrict the question to the context information provided." ) dataset_generator = DatasetGenerator.from_documents( march_docs, question_gen_query=base_question_gen_query, llm=llm_35, ) questions = dataset_generator.generate_questions_from_nodes(num=20) questions from llama_index.llms.openai import OpenAI from llama_index.core import PromptTemplate vary_question_tmpl = """\ You are a financial assistant. Given a question over a 2023 Uber 10Q filing, your goal is to generate up to {num_vary} variations of that question that might span multiple 10Q's. This can include compare/contrasting different 10Qs, replacing the current quarter with another quarter, or generating questions that can only be answered over multiple quarters (be creative!) You are given a valid set of 10Q filings. Please only generate question variations that can be answered in that set. For example: Base Question: What was the free cash flow of Uber in March 2023? Valid 10Qs: [March 2023, June 2023, September 2023] Question Variations: What was the free cash flow of Uber in June 2023? Can you compare/contrast the free cash flow of Uber in June/September 2023 and offer explanations for the change? Did the free cash flow of Uber increase of decrease in 2023? Now let's give it a shot! Base Question: {base_question} Valid 10Qs: {valid_10qs} Question Variations: """ def gen_question_variations(base_questions, num_vary=3): """Generate question variations.""" VALID_10Q_STR = "[March 2022, June 2022, September 2022]" llm =
OpenAI(model="gpt-4")
llama_index.llms.openai.OpenAI
get_ipython().run_line_magic('pip', 'install llama-index-evaluation-tonic-validate') import json import pandas as pd from llama_index.core import VectorStoreIndex, SimpleDirectoryReader from llama_index.evaluation.tonic_validate import ( AnswerConsistencyEvaluator, AnswerSimilarityEvaluator, AugmentationAccuracyEvaluator, AugmentationPrecisionEvaluator, RetrievalPrecisionEvaluator, TonicValidateEvaluator, ) question = "What makes Sam Altman a good founder?" reference_answer = "He is smart and has a great force of will." llm_answer = "He is a good founder because he is smart." retrieved_context_list = [ "Sam Altman is a good founder. He is very smart.", "What makes Sam Altman such a good founder is his great force of will.", ] answer_similarity_evaluator = AnswerSimilarityEvaluator() score = await answer_similarity_evaluator.aevaluate( question, llm_answer, retrieved_context_list, reference_response=reference_answer, ) score answer_consistency_evaluator = AnswerConsistencyEvaluator() score = await answer_consistency_evaluator.aevaluate( question, llm_answer, retrieved_context_list ) score augmentation_accuracy_evaluator = AugmentationAccuracyEvaluator() score = await augmentation_accuracy_evaluator.aevaluate( question, llm_answer, retrieved_context_list ) score augmentation_precision_evaluator = AugmentationPrecisionEvaluator() score = await augmentation_precision_evaluator.aevaluate( question, llm_answer, retrieved_context_list ) score retrieval_precision_evaluator = RetrievalPrecisionEvaluator() score = await retrieval_precision_evaluator.aevaluate( question, llm_answer, retrieved_context_list ) score tonic_validate_evaluator = TonicValidateEvaluator() scores = await tonic_validate_evaluator.aevaluate( question, llm_answer, retrieved_context_list, reference_response=reference_answer, ) scores.score_dict tonic_validate_evaluator = TonicValidateEvaluator() scores = await tonic_validate_evaluator.aevaluate_run( [question], [llm_answer], [retrieved_context_list], [reference_answer] ) scores.run_data[0].scores get_ipython().system('llamaindex-cli download-llamadataset EvaluatingLlmSurveyPaperDataset --download-dir ./data') from llama_index.core import SimpleDirectoryReader from llama_index.core.llama_dataset import LabelledRagDataset from llama_index.core import VectorStoreIndex rag_dataset =
LabelledRagDataset.from_json("./data/rag_dataset.json")
llama_index.core.llama_dataset.LabelledRagDataset.from_json
get_ipython().run_line_magic('pip', 'install llama-index-llms-huggingface') get_ipython().run_line_magic('pip', 'install llama-index-embeddings-huggingface') get_ipython().system('pip install llama-index ipywidgets') import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) from IPython.display import Markdown, display import torch from llama_index.llms.huggingface import HuggingFaceLLM from llama_index.core import PromptTemplate LLAMA2_7B = "meta-llama/Llama-2-7b-hf" LLAMA2_7B_CHAT = "meta-llama/Llama-2-7b-chat-hf" LLAMA2_13B = "meta-llama/Llama-2-13b-hf" LLAMA2_13B_CHAT = "meta-llama/Llama-2-13b-chat-hf" LLAMA2_70B = "meta-llama/Llama-2-70b-hf" LLAMA2_70B_CHAT = "meta-llama/Llama-2-70b-chat-hf" selected_model = LLAMA2_13B_CHAT SYSTEM_PROMPT = """You are an AI assistant that answers questions in a friendly manner, based on the given source documents. Here are some rules you always follow: - Generate human readable output, avoid creating output with gibberish text. - Generate only the requested output, don't include any other language before or after the requested output. - Never say thank you, that you are happy to help, that you are an AI agent, etc. Just answer directly. - Generate professional language typically used in business documents in North America. - Never generate offensive or foul language. """ query_wrapper_prompt = PromptTemplate( "[INST]<<SYS>>\n" + SYSTEM_PROMPT + "<</SYS>>\n\n{query_str}[/INST] " ) llm = HuggingFaceLLM( context_window=4096, max_new_tokens=2048, generate_kwargs={"temperature": 0.0, "do_sample": False}, query_wrapper_prompt=query_wrapper_prompt, tokenizer_name=selected_model, model_name=selected_model, device_map="auto", model_kwargs={"torch_dtype": torch.float16, "load_in_8bit": True}, ) from llama_index.embeddings.huggingface import HuggingFaceEmbedding embed_model =
HuggingFaceEmbedding(model_name="BAAI/bge-small-en-v1.5")
llama_index.embeddings.huggingface.HuggingFaceEmbedding
get_ipython().run_line_magic('pip', 'install llama-index-question-gen-openai') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') from IPython.display import Markdown, display def display_prompt_dict(prompts_dict): for k, p in prompts_dict.items(): text_md = f"**Prompt Key**: {k}<br>" f"**Text:** <br>" display(Markdown(text_md)) print(p.get_template()) display(Markdown("<br><br>")) from llama_index.core.selectors import LLMSingleSelector, LLMMultiSelector from llama_index.core.selectors import ( PydanticMultiSelector, PydanticSingleSelector, ) selector = LLMMultiSelector.from_defaults() from llama_index.core.tools import ToolMetadata tool_choices = [ ToolMetadata( name="covid_nyt", description=("This tool contains a NYT news article about COVID-19"), ), ToolMetadata( name="covid_wiki", description=("This tool contains the Wikipedia page about COVID-19"), ), ToolMetadata( name="covid_tesla", description=("This tool contains the Wikipedia page about apples"), ), ] display_prompt_dict(selector.get_prompts()) selector_result = selector.select( tool_choices, query="Tell me more about COVID-19" ) selector_result.selections from llama_index.core import PromptTemplate from llama_index.llms.openai import OpenAI query_gen_str = """\ You are a helpful assistant that generates multiple search queries based on a \ single input query. Generate {num_queries} search queries, one on each line, \ related to the following input query: Query: {query} Queries: """ query_gen_prompt =
PromptTemplate(query_gen_str)
llama_index.core.PromptTemplate
get_ipython().run_line_magic('pip', 'install llama-index-readers-github') get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-weaviate') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') import nest_asyncio nest_asyncio.apply() import os os.environ["GITHUB_TOKEN"] = "" import os from llama_index.readers.github import GitHubRepositoryIssuesReader, GitHubIssuesClient github_client = GitHubIssuesClient() loader = GitHubRepositoryIssuesReader( github_client, owner="run-llama", repo="llama_index", verbose=True, ) orig_docs = loader.load_data() limit = 100 docs = [] for idx, doc in enumerate(orig_docs): doc.metadata["index_id"] = doc.id_ if idx >= limit: break docs.append(doc) from copy import deepcopy import asyncio from tqdm.asyncio import tqdm_asyncio from llama_index.core.indices import SummaryIndex from llama_index.core import Document, ServiceContext from llama_index.llms.openai import OpenAI from llama_index.core.async_utils import run_jobs async def aprocess_doc(doc, include_summary: bool = True): """Process doc.""" print(f"Processing {doc.id_}") metadata = doc.metadata date_tokens = metadata["created_at"].split("T")[0].split("-") year = int(date_tokens[0]) month = int(date_tokens[1]) day = int(date_tokens[2]) assignee = "" if "assignee" not in doc.metadata else doc.metadata["assignee"] size = "" if len(doc.metadata["labels"]) > 0: size_arr = [l for l in doc.metadata["labels"] if "size:" in l] size = size_arr[0].split(":")[1] if len(size_arr) > 0 else "" new_metadata = { "state": metadata["state"], "year": year, "month": month, "day": day, "assignee": assignee, "size": size, "index_id": doc.id_, } summary_index = SummaryIndex.from_documents([doc]) query_str = "Give a one-sentence concise summary of this issue." query_engine = summary_index.as_query_engine( service_context=ServiceContext.from_defaults(llm=OpenAI(model="gpt-3.5-turbo")) ) summary_txt = str(query_engine.query(query_str)) new_doc = Document(text=summary_txt, metadata=new_metadata) return new_doc async def aprocess_docs(docs): """Process metadata on docs.""" new_docs = [] tasks = [] for doc in docs: task = aprocess_doc(doc) tasks.append(task) new_docs = await
run_jobs(tasks, show_progress=True, workers=5)
llama_index.core.async_utils.run_jobs
get_ipython().run_line_magic('pip', 'install llama-index-readers-file') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai') get_ipython().system('pip install llama-index') import pandas as pd pd.set_option("display.max_rows", None) pd.set_option("display.max_columns", None) pd.set_option("display.width", None) pd.set_option("display.max_colwidth", None) get_ipython().system('wget "https://www.dropbox.com/scl/fi/mlaymdy1ni1ovyeykhhuk/tesla_2021_10k.htm?rlkey=qf9k4zn0ejrbm716j0gg7r802&dl=1" -O tesla_2021_10k.htm') get_ipython().system('wget "https://www.dropbox.com/scl/fi/rkw0u959yb4w8vlzz76sa/tesla_2020_10k.htm?rlkey=tfkdshswpoupav5tqigwz1mp7&dl=1" -O tesla_2020_10k.htm') from llama_index.readers.file import FlatReader from pathlib import Path reader = FlatReader() docs = reader.load_data(Path("./tesla_2020_10k.htm")) from llama_index.core.evaluation import DatasetGenerator, QueryResponseDataset from llama_index.llms.openai import OpenAI from llama_index.embeddings.openai import OpenAIEmbedding from llama_index.readers.file import FlatReader from llama_index.core.node_parser import HTMLNodeParser, SentenceSplitter from llama_index.core.ingestion import IngestionPipeline from pathlib import Path import nest_asyncio nest_asyncio.apply() reader = FlatReader() docs = reader.load_data(Path("./tesla_2020_10k.htm")) pipeline = IngestionPipeline( documents=docs, transformations=[ HTMLNodeParser.from_defaults(), SentenceSplitter(chunk_size=1024, chunk_overlap=200),
OpenAIEmbedding()
llama_index.embeddings.openai.OpenAIEmbedding
get_ipython().run_line_magic('pip', 'install -q llama-index-vector-stores-chroma llama-index-llms-fireworks llama-index-embeddings-fireworks==0.1.2') get_ipython().run_line_magic('pip', 'install -q llama-index') get_ipython().system('pip install llama-index chromadb --quiet') get_ipython().system('pip install -q chromadb') get_ipython().system('pip install -q pydantic==1.10.11') from llama_index.core import VectorStoreIndex, SimpleDirectoryReader from llama_index.vector_stores.chroma import ChromaVectorStore from llama_index.core import StorageContext from llama_index.embeddings.fireworks import FireworksEmbedding from llama_index.llms.fireworks import Fireworks from IPython.display import Markdown, display import chromadb import getpass fw_api_key = getpass.getpass("Fireworks API Key:") get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") from llama_index.llms.fireworks import Fireworks from llama_index.embeddings.fireworks import FireworksEmbedding llm = Fireworks( temperature=0, model="accounts/fireworks/models/mixtral-8x7b-instruct" ) chroma_client = chromadb.EphemeralClient() chroma_collection = chroma_client.create_collection("quickstart") embed_model = FireworksEmbedding( model_name="nomic-ai/nomic-embed-text-v1.5", ) documents = SimpleDirectoryReader("./data/paul_graham/").load_data() vector_store =
ChromaVectorStore(chroma_collection=chroma_collection)
llama_index.vector_stores.chroma.ChromaVectorStore
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-pgvecto-rs') get_ipython().run_line_magic('pip', 'install llama-index "pgvecto_rs[sdk]"') get_ipython().system('docker run --name pgvecto-rs-demo -e POSTGRES_PASSWORD=mysecretpassword -p 5432:5432 -d tensorchord/pgvecto-rs:latest') import logging import os import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) from pgvecto_rs.sdk import PGVectoRs URL = "postgresql+psycopg://{username}:{password}@{host}:{port}/{db_name}".format( port=os.getenv("DB_PORT", "5432"), host=os.getenv("DB_HOST", "localhost"), username=os.getenv("DB_USER", "postgres"), password=os.getenv("DB_PASS", "mysecretpassword"), db_name=os.getenv("DB_NAME", "postgres"), ) client = PGVectoRs( db_url=URL, collection_name="example", dimension=1536, # Using OpenAI’s text-embedding-ada-002 ) import os os.environ["OPENAI_API_KEY"] = "sk-..." from IPython.display import Markdown, display from llama_index.core import SimpleDirectoryReader, VectorStoreIndex from llama_index.vector_stores.pgvecto_rs import PGVectoRsStore get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") documents = SimpleDirectoryReader("./data/paul_graham").load_data() from llama_index.core import StorageContext vector_store =
PGVectoRsStore(client=client)
llama_index.vector_stores.pgvecto_rs.PGVectoRsStore
get_ipython().run_line_magic('pip', 'install llama-index-readers-file') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai') get_ipython().system('pip install llama-index') import pandas as pd pd.set_option("display.max_rows", None) pd.set_option("display.max_columns", None) pd.set_option("display.width", None) pd.set_option("display.max_colwidth", None) get_ipython().system('wget "https://www.dropbox.com/scl/fi/mlaymdy1ni1ovyeykhhuk/tesla_2021_10k.htm?rlkey=qf9k4zn0ejrbm716j0gg7r802&dl=1" -O tesla_2021_10k.htm') get_ipython().system('wget "https://www.dropbox.com/scl/fi/rkw0u959yb4w8vlzz76sa/tesla_2020_10k.htm?rlkey=tfkdshswpoupav5tqigwz1mp7&dl=1" -O tesla_2020_10k.htm') from llama_index.readers.file import FlatReader from pathlib import Path reader = FlatReader() docs = reader.load_data(Path("./tesla_2020_10k.htm")) from llama_index.core.evaluation import DatasetGenerator, QueryResponseDataset from llama_index.llms.openai import OpenAI from llama_index.embeddings.openai import OpenAIEmbedding from llama_index.readers.file import FlatReader from llama_index.core.node_parser import HTMLNodeParser, SentenceSplitter from llama_index.core.ingestion import IngestionPipeline from pathlib import Path import nest_asyncio nest_asyncio.apply() reader = FlatReader() docs = reader.load_data(Path("./tesla_2020_10k.htm")) pipeline = IngestionPipeline( documents=docs, transformations=[ HTMLNodeParser.from_defaults(), SentenceSplitter(chunk_size=1024, chunk_overlap=200), OpenAIEmbedding(), ], ) eval_nodes = pipeline.run(documents=docs) eval_llm = OpenAI(model="gpt-3.5-turbo") dataset_generator = DatasetGenerator( eval_nodes[:100], llm=eval_llm, show_progress=True, num_questions_per_chunk=3, ) eval_dataset = await dataset_generator.agenerate_dataset_from_nodes(num=100) len(eval_dataset.qr_pairs) eval_dataset.save_json("data/tesla10k_eval_dataset.json") eval_dataset = QueryResponseDataset.from_json( "data/tesla10k_eval_dataset.json" ) eval_qs = eval_dataset.questions qr_pairs = eval_dataset.qr_pairs ref_response_strs = [r for (_, r) in qr_pairs] from llama_index.core.evaluation import ( CorrectnessEvaluator, SemanticSimilarityEvaluator, ) from llama_index.core.evaluation.eval_utils import ( get_responses, get_results_df, ) from llama_index.core.evaluation import BatchEvalRunner evaluator_c = CorrectnessEvaluator(llm=eval_llm) evaluator_s = SemanticSimilarityEvaluator(llm=eval_llm) evaluator_dict = { "correctness": evaluator_c, "semantic_similarity": evaluator_s, } batch_eval_runner = BatchEvalRunner( evaluator_dict, workers=2, show_progress=True ) from llama_index.core import VectorStoreIndex async def run_evals( pipeline, batch_eval_runner, docs, eval_qs, eval_responses_ref ): nodes = pipeline.run(documents=docs) vector_index =
VectorStoreIndex(nodes)
llama_index.core.VectorStoreIndex
get_ipython().run_line_magic('pip', 'install llama-index-readers-file') get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai') get_ipython().system('mkdir data') get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"') from pathlib import Path from llama_index.readers.file import PyMuPDFReader loader =
PyMuPDFReader()
llama_index.readers.file.PyMuPDFReader
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-myscale') get_ipython().system('pip install llama-index') import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) from os import environ import clickhouse_connect environ["OPENAI_API_KEY"] = "sk-*" client = clickhouse_connect.get_client( host="YOUR_CLUSTER_HOST", port=8443, username="YOUR_USERNAME", password="YOUR_CLUSTER_PASSWORD", ) from llama_index.core import VectorStoreIndex, SimpleDirectoryReader from llama_index.vector_stores.myscale import MyScaleVectorStore from IPython.display import Markdown, display documents = SimpleDirectoryReader("../data/paul_graham").load_data() print("Document ID:", documents[0].doc_id) print("Number of Documents: ", len(documents)) get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") loader = SimpleDirectoryReader("./data/paul_graham/") documents = loader.load_data() for file in loader.input_files: print(file) from llama_index.core import StorageContext for document in documents: document.metadata = {"user_id": "123", "favorite_color": "blue"} vector_store =
MyScaleVectorStore(myscale_client=client)
llama_index.vector_stores.myscale.MyScaleVectorStore
get_ipython().run_line_magic('pip', 'install llama-index-multi-modal-llms-openai') get_ipython().system('pip install openai matplotlib') import os OPENAI_API_TOKEN = "sk-" # Your OpenAI API token here os.environ["OPENAI_API_TOKEN"] = OPENAI_API_TOKEN from llama_index.multi_modal_llms.openai import OpenAIMultiModal from llama_index.core.multi_modal_llms.generic_utils import load_image_urls image_urls = [ "https://res.cloudinary.com/hello-tickets/image/upload/c_limit,f_auto,q_auto,w_1920/v1640835927/o3pfl41q7m5bj8jardk0.jpg", ] image_documents = load_image_urls(image_urls) openai_mm_llm = OpenAIMultiModal( model="gpt-4-vision-preview", api_key=OPENAI_API_TOKEN, max_new_tokens=300 ) from PIL import Image import requests from io import BytesIO import matplotlib.pyplot as plt img_response = requests.get(image_urls[0]) print(image_urls[0]) img = Image.open(BytesIO(img_response.content)) plt.imshow(img) complete_response = openai_mm_llm.complete( prompt="Describe the images as an alternative text", image_documents=image_documents, ) print(complete_response) stream_complete_response = openai_mm_llm.stream_complete( prompt="give me more context for this image", image_documents=image_documents, ) for r in stream_complete_response: print(r.delta, end="") from llama_index.core.multi_modal_llms.openai_utils import ( generate_openai_multi_modal_chat_message, ) chat_msg_1 = generate_openai_multi_modal_chat_message( prompt="Describe the images as an alternative text", role="user", image_documents=image_documents, ) chat_msg_2 = generate_openai_multi_modal_chat_message( prompt="The image is a graph showing the surge in US mortgage rates. It is a visual representation of data, with a title at the top and labels for the x and y-axes. Unfortunately, without seeing the image, I cannot provide specific details about the data or the exact design of the graph.", role="assistant", ) chat_msg_3 = generate_openai_multi_modal_chat_message( prompt="can I know more?", role="user", ) chat_messages = [chat_msg_1, chat_msg_2, chat_msg_3] chat_response = openai_mm_llm.chat( messages=chat_messages, ) for msg in chat_messages: print(msg.role, msg.content) print(chat_response) stream_chat_response = openai_mm_llm.stream_chat( messages=chat_messages, ) for r in stream_chat_response: print(r.delta, end="") response_acomplete = await openai_mm_llm.acomplete( prompt="Describe the images as an alternative text", image_documents=image_documents, ) print(response_acomplete) response_astream_complete = await openai_mm_llm.astream_complete( prompt="Describe the images as an alternative text", image_documents=image_documents, ) async for delta in response_astream_complete: print(delta.delta, end="") achat_response = await openai_mm_llm.achat( messages=chat_messages, ) print(achat_response) astream_chat_response = await openai_mm_llm.astream_chat( messages=chat_messages, ) async for delta in astream_chat_response: print(delta.delta, end="") image_urls = [ "https://www.visualcapitalist.com/wp-content/uploads/2023/10/US_Mortgage_Rate_Surge-Sept-11-1.jpg", "https://www.sportsnet.ca/wp-content/uploads/2023/11/CP1688996471-1040x572.jpg", ] image_documents_1 = load_image_urls(image_urls) response_multi = openai_mm_llm.complete( prompt="is there any relationship between those images?", image_documents=image_documents_1, ) print(response_multi) from llama_index.core import SimpleDirectoryReader image_documents =
SimpleDirectoryReader("./images_wiki")
llama_index.core.SimpleDirectoryReader
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') from llama_index.llms.openai import OpenAI resp = OpenAI().complete("Paul Graham is ") print(resp) from llama_index.core.llms import ChatMessage from llama_index.llms.openai import OpenAI messages = [ ChatMessage( role="system", content="You are a pirate with a colorful personality" ), ChatMessage(role="user", content="What is your name"), ] resp =
OpenAI()
llama_index.llms.openai.OpenAI
import os import sys import logging from dotenv import load_dotenv logging.basicConfig(stream=sys.stderr, level=logging.INFO) logger = logging.getLogger(__name__) load_dotenv() # take environment variables from .env. logger.debug(f"NewRelic application: {os.getenv('NEW_RELIC_APP_NAME')}") import os from time import time from nr_openai_observability import monitor from llama_index import VectorStoreIndex, download_loader if os.getenv("NEW_RELIC_APP_NAME") and os.getenv("NEW_RELIC_LICENSE_KEY"): monitor.initialization(application_name=os.getenv("NEW_RELIC_APP_NAME")) RayyanReader =
download_loader("RayyanReader")
llama_index.download_loader
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-bagel') from llama_index.core import VectorStoreIndex, SimpleDirectoryReader from llama_index.vector_stores.bagel import BagelVectorStore from llama_index.core import StorageContext from IPython.display import Markdown, display import bagel from bagel import Settings import os import getpass os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:") import openai openai.api_key = os.environ["OPENAI_API_KEY"] get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") server_settings = Settings( bagel_api_impl="rest", bagel_server_host="api.bageldb.ai" ) client = bagel.Client(server_settings) collection = client.get_or_create_cluster("testing_embeddings") embed_model = "local:BAAI/bge-small-en-v1.5" documents = SimpleDirectoryReader("./data/paul_graham/").load_data() vector_store = BagelVectorStore(collection=collection) storage_context =
StorageContext.from_defaults(vector_store=vector_store)
llama_index.core.StorageContext.from_defaults
get_ipython().run_line_magic('pip', 'install llama-index-llms-clarifai') get_ipython().system('pip install llama-index') get_ipython().system('pip install clarifai') import os os.environ["CLARIFAI_PAT"] = "<YOUR CLARIFAI PAT>" from llama_index.llms.clarifai import Clarifai params = dict( user_id="clarifai", app_id="ml", model_name="llama2-7b-alternative-4k", model_url=( "https://clarifai.com/clarifai/ml/models/llama2-7b-alternative-4k" ), ) llm_model =
Clarifai(model_url=params["model_url"])
llama_index.llms.clarifai.Clarifai
get_ipython().system('pip install llama-index llama-index-packs-raptor llama-index-vector-stores-qdrant') from llama_index.packs.raptor import RaptorPack get_ipython().system('wget https://arxiv.org/pdf/2401.18059.pdf -O ./raptor_paper.pdf') import os os.environ["OPENAI_API_KEY"] = "sk-..." import nest_asyncio nest_asyncio.apply() from llama_index.core import SimpleDirectoryReader documents = SimpleDirectoryReader(input_files=["./raptor_paper.pdf"]).load_data() from llama_index.core.node_parser import SentenceSplitter from llama_index.llms.openai import OpenAI from llama_index.embeddings.openai import OpenAIEmbedding from llama_index.vector_stores.chroma import ChromaVectorStore import chromadb client = chromadb.PersistentClient(path="./raptor_paper_db") collection = client.get_or_create_collection("raptor") vector_store = ChromaVectorStore(chroma_collection=collection) raptor_pack = RaptorPack( documents, embed_model=OpenAIEmbedding( model="text-embedding-3-small" ), # used for embedding clusters llm=OpenAI(model="gpt-3.5-turbo", temperature=0.1), # used for generating summaries vector_store=vector_store, # used for storage similarity_top_k=2, # top k for each layer, or overall top-k for collapsed mode="collapsed", # sets default mode transformations=[ SentenceSplitter(chunk_size=400, chunk_overlap=50) ], # transformations applied for ingestion ) nodes = raptor_pack.run("What baselines is raptor compared against?", mode="collapsed") print(len(nodes)) print(nodes[0].text) nodes = raptor_pack.run( "What baselines is raptor compared against?", mode="tree_traversal" ) print(len(nodes)) print(nodes[0].text) from llama_index.packs.raptor import RaptorRetriever retriever = RaptorRetriever( [], embed_model=OpenAIEmbedding( model="text-embedding-3-small" ), # used for embedding clusters llm=
OpenAI(model="gpt-3.5-turbo", temperature=0.1)
llama_index.llms.openai.OpenAI
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-huggingface') get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai') import nest_asyncio nest_asyncio.apply() from llama_index.embeddings.huggingface import ( HuggingFaceEmbedding, HuggingFaceInferenceAPIEmbedding, ) from llama_index.embeddings.openai import OpenAIEmbedding from llama_index.core import Settings model_name = "jinaai/jina-embeddings-v2-small-en" embed_model = HuggingFaceEmbedding( model_name=model_name, trust_remote_code=True ) Settings.embed_model = embed_model Settings.chunk_size = 1024 embed_model_base = OpenAIEmbedding() from llama_index.core import VectorStoreIndex, SimpleDirectoryReader reader = SimpleDirectoryReader("../data/paul_graham") docs = reader.load_data() index_jina = VectorStoreIndex.from_documents(docs, embed_model=embed_model) index_base = VectorStoreIndex.from_documents( docs, embed_model=embed_model_base ) from llama_index.core.response.notebook_utils import display_source_node retriever_jina = index_jina.as_retriever(similarity_top_k=1) retriever_base = index_base.as_retriever(similarity_top_k=1) retrieved_nodes = retriever_jina.retrieve( "What did the author do in art school?" ) for n in retrieved_nodes:
display_source_node(n, source_length=2000)
llama_index.core.response.notebook_utils.display_source_node
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-chroma') get_ipython().system('pip install llama-index') import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) import os import getpass os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:") import openai openai.api_key = os.environ["OPENAI_API_KEY"] import chromadb chroma_client = chromadb.EphemeralClient() chroma_collection = chroma_client.create_collection("quickstart") from llama_index.core import VectorStoreIndex, StorageContext from llama_index.vector_stores.chroma import ChromaVectorStore from llama_index.core.schema import TextNode nodes = [ TextNode( text=( "Michael Jordan is a retired professional basketball player," " widely regarded as one of the greatest basketball players of all" " time." ), metadata={ "category": "Sports", "country": "United States", }, ), TextNode( text=( "Angelina Jolie is an American actress, filmmaker, and" " humanitarian. She has received numerous awards for her acting" " and is known for her philanthropic work." ), metadata={ "category": "Entertainment", "country": "United States", }, ), TextNode( text=( "Elon Musk is a business magnate, industrial designer, and" " engineer. He is the founder, CEO, and lead designer of SpaceX," " Tesla, Inc., Neuralink, and The Boring Company." ), metadata={ "category": "Business", "country": "United States", }, ),
TextNode( text=( "Rihanna is a Barbadian singer, actress, and businesswoman. She" " has achieved significant success in the music industry and is" " known for her versatile musical style." )
llama_index.core.schema.TextNode
from llama_index.agent import OpenAIAgent import openai openai.api_key = "sk-api-key" from llama_index.tools.gmail.base import GmailToolSpec from llama_index.tools.google_calendar.base import GoogleCalendarToolSpec from llama_index.tools.google_search.base import GoogleSearchToolSpec gmail_tools = GmailToolSpec().to_tool_list() gcal_tools = GoogleCalendarToolSpec().to_tool_list() gsearch_tools =
GoogleSearchToolSpec(key="api-key", engine="engine")
llama_index.tools.google_search.base.GoogleSearchToolSpec
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-llms-cohere') get_ipython().run_line_magic('pip', 'install llama-index-llms-gemini') get_ipython().system('pip install "google-generativeai" -q') import nest_asyncio nest_asyncio.apply() from llama_index.core.llama_dataset import download_llama_dataset pairwise_evaluator_dataset, _ = download_llama_dataset( "MtBenchHumanJudgementDataset", "./mt_bench_data" ) pairwise_evaluator_dataset.to_pandas()[:5] from llama_index.core.evaluation import PairwiseComparisonEvaluator from llama_index.llms.openai import OpenAI from llama_index.llms.gemini import Gemini from llama_index.llms.cohere import Cohere llm_gpt4 = OpenAI(temperature=0, model="gpt-4") llm_gpt35 = OpenAI(temperature=0, model="gpt-3.5-turbo") llm_gemini = Gemini(model="models/gemini-pro", temperature=0) evaluators = { "gpt-4": PairwiseComparisonEvaluator(llm=llm_gpt4), "gpt-3.5":
PairwiseComparisonEvaluator(llm=llm_gpt35)
llama_index.core.evaluation.PairwiseComparisonEvaluator
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') import os os.environ["OPENAI_API_KEY"] = "sk-..." import tiktoken from llama_index.core.callbacks import CallbackManager, TokenCountingHandler from llama_index.llms.openai import OpenAI from llama_index.core import Settings token_counter = TokenCountingHandler( tokenizer=tiktoken.encoding_for_model("gpt-3.5-turbo").encode ) Settings.llm = OpenAI(model="gpt-3.5-turbo", temperature=0.2) Settings.callback_manager =
CallbackManager([token_counter])
llama_index.core.callbacks.CallbackManager
get_ipython().run_line_magic('pip', 'install llama-index-llms-monsterapi') get_ipython().system('python3 -m pip install llama-index --quiet -y') get_ipython().system('python3 -m pip install monsterapi --quiet') get_ipython().system('python3 -m pip install sentence_transformers --quiet') import os from llama_index.llms.monsterapi import MonsterLLM from llama_index.core.embeddings import resolve_embed_model from llama_index.core.node_parser import SentenceSplitter from llama_index.core import VectorStoreIndex, SimpleDirectoryReader os.environ["MONSTER_API_KEY"] = "" model = "llama2-7b-chat" llm = MonsterLLM(model=model, temperature=0.75) result = llm.complete("Who are you?") print(result) from llama_index.core.llms import ChatMessage history_message = ChatMessage( **{ "role": "user", "content": ( "When asked 'who are you?' respond as 'I am qblocks llm model'" " everytime." ), } ) current_message = ChatMessage(**{"role": "user", "content": "Who are you?"}) response = llm.chat([history_message, current_message]) print(response) get_ipython().system('python3 -m pip install pypdf --quiet') get_ipython().system('rm -r ./data') get_ipython().system('mkdir -p data&&cd data&&curl \'https://arxiv.org/pdf/2005.11401.pdf\' -o "RAG.pdf"') documents = SimpleDirectoryReader("./data").load_data() llm = MonsterLLM(model=model, temperature=0.75, context_window=1024) embed_model = resolve_embed_model("local:BAAI/bge-small-en-v1.5") splitter = SentenceSplitter(chunk_size=1024) index = VectorStoreIndex.from_documents( documents, transformations=[splitter], embed_model=embed_model ) query_engine = index.as_query_engine(llm=llm) response = llm.complete("What is Retrieval-Augmented Generation?") print(response) response = query_engine.query("What is Retrieval-Augmented Generation?") print(response) deploy_llm = MonsterLLM( model="deploy-llm", base_url="https://ecc7deb6-26e0-419b-a7f2-0deb934af29a.monsterapi.ai", monster_api_key="a0f8a6ba-c32f-4407-af0c-169f1915490c", temperature=0.75, ) deploy_llm.complete("What is Retrieval-Augmented Generation?") from llama_index.core.llms import ChatMessage history_message = ChatMessage( **{ "role": "user", "content": ( "When asked 'who are you?' respond as 'I am qblocks llm model'" " everytime." ), } ) current_message =
ChatMessage(**{"role": "user", "content": "Who are you?"})
llama_index.core.llms.ChatMessage
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai') get_ipython().run_line_magic('pip', 'install llama-index-postprocessor-cohere-rerank') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') import phoenix as px px.launch_app() import llama_index.core llama_index.core.set_global_handler("arize_phoenix") from llama_index.llms.openai import OpenAI from llama_index.embeddings.openai import OpenAIEmbedding from llama_index.core import Settings Settings.llm = OpenAI(model="gpt-3.5-turbo") Settings.embed_model = OpenAIEmbedding(model="text-embedding-3-small") from llama_index.core import SimpleDirectoryReader reader = SimpleDirectoryReader("../data/paul_graham") docs = reader.load_data() import os from llama_index.core import ( StorageContext, VectorStoreIndex, load_index_from_storage, ) if not os.path.exists("storage"): index = VectorStoreIndex.from_documents(docs) index.set_index_id("vector_index") index.storage_context.persist("./storage") else: storage_context = StorageContext.from_defaults(persist_dir="storage") index = load_index_from_storage(storage_context, index_id="vector_index") from llama_index.core.query_pipeline import QueryPipeline from llama_index.core import PromptTemplate prompt_str = "Please generate related movies to {movie_name}" prompt_tmpl = PromptTemplate(prompt_str) llm = OpenAI(model="gpt-3.5-turbo") p = QueryPipeline(chain=[prompt_tmpl, llm], verbose=True) output = p.run(movie_name="The Departed") print(str(output)) from typing import List from pydantic import BaseModel, Field from llama_index.core.output_parsers import PydanticOutputParser class Movie(BaseModel): """Object representing a single movie.""" name: str = Field(..., description="Name of the movie.") year: int = Field(..., description="Year of the movie.") class Movies(BaseModel): """Object representing a list of movies.""" movies: List[Movie] = Field(..., description="List of movies.") llm = OpenAI(model="gpt-3.5-turbo") output_parser = PydanticOutputParser(Movies) json_prompt_str = """\ Please generate related movies to {movie_name}. Output with the following JSON format: """ json_prompt_str = output_parser.format(json_prompt_str) json_prompt_tmpl = PromptTemplate(json_prompt_str) p = QueryPipeline(chain=[json_prompt_tmpl, llm, output_parser], verbose=True) output = p.run(movie_name="Toy Story") output prompt_str = "Please generate related movies to {movie_name}" prompt_tmpl = PromptTemplate(prompt_str) prompt_str2 = """\ Here's some text: {text} Can you rewrite this with a summary of each movie? """ prompt_tmpl2 = PromptTemplate(prompt_str2) llm = OpenAI(model="gpt-3.5-turbo") llm_c = llm.as_query_component(streaming=True) p = QueryPipeline( chain=[prompt_tmpl, llm_c, prompt_tmpl2, llm_c], verbose=True ) output = p.run(movie_name="The Dark Knight") for o in output: print(o.delta, end="") p = QueryPipeline( chain=[ json_prompt_tmpl, llm.as_query_component(streaming=True), output_parser, ], verbose=True, ) output = p.run(movie_name="Toy Story") print(output) from llama_index.postprocessor.cohere_rerank import CohereRerank prompt_str1 = "Please generate a concise question about Paul Graham's life regarding the following topic {topic}" prompt_tmpl1 = PromptTemplate(prompt_str1) prompt_str2 = ( "Please write a passage to answer the question\n" "Try to include as many key details as possible.\n" "\n" "\n" "{query_str}\n" "\n" "\n" 'Passage:"""\n' ) prompt_tmpl2 = PromptTemplate(prompt_str2) llm = OpenAI(model="gpt-3.5-turbo") retriever = index.as_retriever(similarity_top_k=5) p = QueryPipeline( chain=[prompt_tmpl1, llm, prompt_tmpl2, llm, retriever], verbose=True ) nodes = p.run(topic="college") len(nodes) from llama_index.postprocessor.cohere_rerank import CohereRerank from llama_index.core.response_synthesizers import TreeSummarize prompt_str = "Please generate a question about Paul Graham's life regarding the following topic {topic}" prompt_tmpl = PromptTemplate(prompt_str) llm = OpenAI(model="gpt-3.5-turbo") retriever = index.as_retriever(similarity_top_k=3) reranker = CohereRerank() summarizer = TreeSummarize(llm=llm) p = QueryPipeline(verbose=True) p.add_modules( { "llm": llm, "prompt_tmpl": prompt_tmpl, "retriever": retriever, "summarizer": summarizer, "reranker": reranker, } ) p.add_link("prompt_tmpl", "llm") p.add_link("llm", "retriever") p.add_link("retriever", "reranker", dest_key="nodes") p.add_link("llm", "reranker", dest_key="query_str") p.add_link("reranker", "summarizer", dest_key="nodes") p.add_link("llm", "summarizer", dest_key="query_str") print(summarizer.as_query_component().input_keys) from pyvis.network import Network net = Network(notebook=True, cdn_resources="in_line", directed=True) net.from_nx(p.dag) net.show("rag_dag.html") response = p.run(topic="YC") print(str(response)) response = await p.arun(topic="YC") print(str(response)) from llama_index.postprocessor.cohere_rerank import CohereRerank from llama_index.core.response_synthesizers import TreeSummarize from llama_index.core.query_pipeline import InputComponent retriever = index.as_retriever(similarity_top_k=5) summarizer = TreeSummarize(llm=OpenAI(model="gpt-3.5-turbo")) reranker = CohereRerank() p =
QueryPipeline(verbose=True)
llama_index.core.query_pipeline.QueryPipeline
get_ipython().run_line_magic('pip', 'install llama-index-llms-konko') get_ipython().system('pip install llama-index') import os os.environ["KONKO_API_KEY"] = "<your-api-key>" from llama_index.llms.konko import Konko from llama_index.core.llms import ChatMessage llm = Konko(model="meta-llama/llama-2-13b-chat") messages =
ChatMessage(role="user", content="Explain Big Bang Theory briefly")
llama_index.core.llms.ChatMessage
get_ipython().run_line_magic('pip', 'install llama-index-readers-file') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('mkdir data') get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"') from pathlib import Path from llama_index.readers.file import PyMuPDFReader loader = PyMuPDFReader() documents = loader.load(file_path="./data/llama2.pdf") from llama_index.core import VectorStoreIndex from llama_index.core.node_parser import SentenceSplitter from llama_index.llms.openai import OpenAI llm = OpenAI(model="gpt-4") node_parser =
SentenceSplitter(chunk_size=1024)
llama_index.core.node_parser.SentenceSplitter
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-docarray') get_ipython().system('pip install llama-index') import os import sys import logging import textwrap import warnings warnings.filterwarnings("ignore") os.environ["TOKENIZERS_PARALLELISM"] = "false" from llama_index.core import ( GPTVectorStoreIndex, SimpleDirectoryReader, Document, ) from llama_index.vector_stores.docarray import DocArrayInMemoryVectorStore from IPython.display import Markdown, display import os os.environ["OPENAI_API_KEY"] = "<your openai key>" get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") documents = SimpleDirectoryReader("./data/paul_graham/").load_data() print( "Document ID:", documents[0].doc_id, "Document Hash:", documents[0].doc_hash, ) from llama_index.core import StorageContext vector_store = DocArrayInMemoryVectorStore() storage_context = StorageContext.from_defaults(vector_store=vector_store) index = GPTVectorStoreIndex.from_documents( documents, storage_context=storage_context ) query_engine = index.as_query_engine() response = query_engine.query("What did the author do growing up?") print(textwrap.fill(str(response), 100)) response = query_engine.query("What was a hard moment for the author?") print(textwrap.fill(str(response), 100)) from llama_index.core.schema import TextNode nodes = [ TextNode( text="The Shawshank Redemption", metadata={ "author": "Stephen King", "theme": "Friendship", }, ), TextNode( text="The Godfather", metadata={ "director": "Francis Ford Coppola", "theme": "Mafia", }, ), TextNode( text="Inception", metadata={ "director": "Christopher Nolan", }, ), ] from llama_index.core import StorageContext vector_store = DocArrayInMemoryVectorStore() storage_context = StorageContext.from_defaults(vector_store=vector_store) index =
GPTVectorStoreIndex(nodes, storage_context=storage_context)
llama_index.core.GPTVectorStoreIndex
get_ipython().run_line_magic('pip', 'install llama-index-readers-file') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('mkdir data') get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"') from pathlib import Path from llama_index.readers.file import PyMuPDFReader loader = PyMuPDFReader() documents = loader.load(file_path="./data/llama2.pdf") from llama_index.core import VectorStoreIndex from llama_index.core.node_parser import SentenceSplitter from llama_index.llms.openai import OpenAI llm = OpenAI(model="gpt-4") node_parser = SentenceSplitter(chunk_size=1024) nodes = node_parser.get_nodes_from_documents(documents) index = VectorStoreIndex(nodes) query_engine = index.as_query_engine(llm=llm) from llama_index.core.schema import BaseNode from llama_index.llms.openai import OpenAI from llama_index.core.llms import ChatMessage, MessageRole from llama_index.core import ChatPromptTemplate, PromptTemplate from typing import Tuple, List import re llm = OpenAI(model="gpt-4") QA_PROMPT = PromptTemplate( "Context information is below.\n" "---------------------\n" "{context_str}\n" "---------------------\n" "Given the context information and not prior knowledge, " "answer the query.\n" "Query: {query_str}\n" "Answer: " ) def generate_answers_for_questions( questions: List[str], context: str, llm: OpenAI ) -> str: """Generate answers for questions given context.""" answers = [] for question in questions: fmt_qa_prompt = QA_PROMPT.format( context_str=context, query_str=question ) response_obj = llm.complete(fmt_qa_prompt) answers.append(str(response_obj)) return answers QUESTION_GEN_USER_TMPL = ( "Context information is below.\n" "---------------------\n" "{context_str}\n" "---------------------\n" "Given the context information and not prior knowledge, " "generate the relevant questions. " ) QUESTION_GEN_SYS_TMPL = """\ You are a Teacher/ Professor. Your task is to setup \ {num_questions_per_chunk} questions for an upcoming \ quiz/examination. The questions should be diverse in nature \ across the document. Restrict the questions to the \ context information provided.\ """ question_gen_template = ChatPromptTemplate( message_templates=[ ChatMessage(role=MessageRole.SYSTEM, content=QUESTION_GEN_SYS_TMPL), ChatMessage(role=MessageRole.USER, content=QUESTION_GEN_USER_TMPL), ] ) def generate_qa_pairs( nodes: List[BaseNode], llm: OpenAI, num_questions_per_chunk: int = 10 ) -> List[Tuple[str, str]]: """Generate questions.""" qa_pairs = [] for idx, node in enumerate(nodes): print(f"Node {idx}/{len(nodes)}") context_str = node.get_content(metadata_mode="all") fmt_messages = question_gen_template.format_messages( num_questions_per_chunk=10, context_str=context_str, ) chat_response = llm.chat(fmt_messages) raw_output = chat_response.message.content result_list = str(raw_output).strip().split("\n") cleaned_questions = [ re.sub(r"^\d+[\).\s]", "", question).strip() for question in result_list ] answers = generate_answers_for_questions( cleaned_questions, context_str, llm ) cur_qa_pairs = list(zip(cleaned_questions, answers)) qa_pairs.extend(cur_qa_pairs) return qa_pairs qa_pairs qa_pairs = generate_qa_pairs( nodes, llm, num_questions_per_chunk=10, ) import pickle pickle.dump(qa_pairs, open("eval_dataset.pkl", "wb")) import pickle qa_pairs = pickle.load(open("eval_dataset.pkl", "rb")) from llama_index.core.llms import ChatMessage, MessageRole from llama_index.core import ChatPromptTemplate, PromptTemplate from typing import Dict CORRECTNESS_SYS_TMPL = """ You are an expert evaluation system for a question answering chatbot. You are given the following information: - a user query, - a reference answer, and - a generated answer. Your job is to judge the relevance and correctness of the generated answer. Output a single score that represents a holistic evaluation. You must return your response in a line with only the score. Do not return answers in any other format. On a separate line provide your reasoning for the score as well. Follow these guidelines for scoring: - Your score has to be between 1 and 5, where 1 is the worst and 5 is the best. - If the generated answer is not relevant to the user query, \ you should give a score of 1. - If the generated answer is relevant but contains mistakes, \ you should give a score between 2 and 3. - If the generated answer is relevant and fully correct, \ you should give a score between 4 and 5. """ CORRECTNESS_USER_TMPL = """ {query} {reference_answer} {generated_answer} """ eval_chat_template = ChatPromptTemplate( message_templates=[ ChatMessage(role=MessageRole.SYSTEM, content=CORRECTNESS_SYS_TMPL),
ChatMessage(role=MessageRole.USER, content=CORRECTNESS_USER_TMPL)
llama_index.core.llms.ChatMessage
from llama_index.llms.openai import OpenAI from llama_index.core import VectorStoreIndex from llama_index.embeddings.openai import OpenAIEmbedding from llama_index.core.postprocessor import LLMRerank from llama_index.core import VectorStoreIndex from llama_index.vector_stores.pinecone import PineconeVectorStore from llama_index.core import Settings from llama_index.core.query_engine import RetrieverQueryEngine from llama_index.packs.koda_retriever import KodaRetriever import os from pinecone import Pinecone pc = Pinecone(api_key=os.environ.get("PINECONE_API_KEY")) index = pc.Index("sample-movies") Settings.llm = OpenAI() Settings.embed_model =
OpenAIEmbedding()
llama_index.embeddings.openai.OpenAIEmbedding
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-mongodb') get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-qdrant') get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-firestore') get_ipython().run_line_magic('pip', 'install llama-index-retrievers-bm25') get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-redis') get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-dynamodb') get_ipython().run_line_magic('pip', 'install llama-index-readers-file') get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "./llama2.pdf"') get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/1706.03762.pdf" -O "./attention.pdf"') from llama_index.core import download_loader from llama_index.readers.file import PyMuPDFReader llama2_docs = PyMuPDFReader().load_data( file_path="./llama2.pdf", metadata=True ) attention_docs = PyMuPDFReader().load_data( file_path="./attention.pdf", metadata=True ) import os os.environ["OPENAI_API_KEY"] = "sk-..." from llama_index.core.node_parser import TokenTextSplitter nodes = TokenTextSplitter( chunk_size=1024, chunk_overlap=128 ).get_nodes_from_documents(llama2_docs + attention_docs) from llama_index.core.storage.docstore import SimpleDocumentStore from llama_index.storage.docstore.redis import RedisDocumentStore from llama_index.storage.docstore.mongodb import MongoDocumentStore from llama_index.storage.docstore.firestore import FirestoreDocumentStore from llama_index.storage.docstore.dynamodb import DynamoDBDocumentStore docstore = SimpleDocumentStore() docstore.add_documents(nodes) from llama_index.core import VectorStoreIndex, StorageContext from llama_index.retrievers.bm25 import BM25Retriever from llama_index.vector_stores.qdrant import QdrantVectorStore from qdrant_client import QdrantClient client = QdrantClient(path="./qdrant_data") vector_store = QdrantVectorStore("composable", client=client) storage_context = StorageContext.from_defaults(vector_store=vector_store) index =
VectorStoreIndex(nodes=nodes)
llama_index.core.VectorStoreIndex
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-program-openai') from llama_index.llms.openai import OpenAI llm =
OpenAI(model="gpt-3.5-turbo-1106")
llama_index.llms.openai.OpenAI
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-txtai') get_ipython().system('pip install llama-index') import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) import txtai txtai_index = txtai.ann.ANNFactory.create({"backend": "numpy"}) from llama_index.core import ( SimpleDirectoryReader, load_index_from_storage, VectorStoreIndex, StorageContext, ) from llama_index.vector_stores.txtai import TxtaiVectorStore from IPython.display import Markdown, display get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") documents =
SimpleDirectoryReader("./data/paul_graham/")
llama_index.core.SimpleDirectoryReader
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-docarray') get_ipython().system('pip install llama-index') import os import sys import logging import textwrap import warnings warnings.filterwarnings("ignore") os.environ["TOKENIZERS_PARALLELISM"] = "false" from llama_index.core import ( GPTVectorStoreIndex, SimpleDirectoryReader, Document, ) from llama_index.vector_stores.docarray import DocArrayInMemoryVectorStore from IPython.display import Markdown, display import os os.environ["OPENAI_API_KEY"] = "<your openai key>" get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") documents =
SimpleDirectoryReader("./data/paul_graham/")
llama_index.core.SimpleDirectoryReader
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-epsilla') get_ipython().system('pip/pip3 install pyepsilla') get_ipython().system('pip install llama-index') import logging import sys from llama_index.core import SimpleDirectoryReader, Document, StorageContext from llama_index.core import VectorStoreIndex from llama_index.vector_stores.epsilla import EpsillaVectorStore import textwrap import openai import getpass OPENAI_API_KEY = getpass.getpass("OpenAI API Key:") openai.api_key = OPENAI_API_KEY get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") documents = SimpleDirectoryReader("./data/paul_graham/").load_data() print(f"Total documents: {len(documents)}") print(f"First document, id: {documents[0].doc_id}") print(f"First document, hash: {documents[0].hash}") from pyepsilla import vectordb client = vectordb.Client() vector_store = EpsillaVectorStore(client=client, db_path="/tmp/llamastore") storage_context =
StorageContext.from_defaults(vector_store=vector_store)
llama_index.core.StorageContext.from_defaults
get_ipython().run_line_magic('pip', 'install llama-hub-llama-packs-agents-llm-compiler-step') get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') import phoenix as px px.launch_app() import llama_index.core llama_index.core.set_global_handler("arize_phoenix") import nest_asyncio nest_asyncio.apply() from llama_index.packs.agents.llm_compiler.step import LLMCompilerAgentWorker from llama_index.core.llama_pack import download_llama_pack download_llama_pack( "LLMCompilerAgentPack", "./agent_pack", skip_load=True, ) from agent_pack.step import LLMCompilerAgentWorker import json from typing import Sequence, List from llama_index.llms.openai import OpenAI from llama_index.core.llms import ChatMessage from llama_index.core.tools import BaseTool, FunctionTool import nest_asyncio nest_asyncio.apply() def multiply(a: int, b: int) -> int: """Multiple two integers and returns the result integer""" return a * b multiply_tool = FunctionTool.from_defaults(fn=multiply) def add(a: int, b: int) -> int: """Add two integers and returns the result integer""" return a + b add_tool = FunctionTool.from_defaults(fn=add) tools = [multiply_tool, add_tool] multiply_tool.metadata.fn_schema_str from llama_index.core.agent import AgentRunner llm = OpenAI(model="gpt-4") callback_manager = llm.callback_manager agent_worker = LLMCompilerAgentWorker.from_tools( tools, llm=llm, verbose=True, callback_manager=callback_manager ) agent = AgentRunner(agent_worker, callback_manager=callback_manager) response = agent.chat("What is (121 * 3) + 42?") response agent.memory.get_all() get_ipython().system('pip install llama-index-readers-wikipedia') from llama_index.readers.wikipedia import WikipediaReader wiki_titles = ["Toronto", "Seattle", "Chicago", "Boston", "Miami"] city_docs = {} reader =
WikipediaReader()
llama_index.readers.wikipedia.WikipediaReader
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-weaviate') get_ipython().run_line_magic('pip', 'install llama-index-embeddings-huggingface') get_ipython().system('pip install llama-index') from llama_index.core.ingestion.cache import RedisCache from llama_index.core.ingestion import IngestionCache ingest_cache = IngestionCache( cache=RedisCache.from_host_and_port(host="127.0.0.1", port=6379), collection="my_test_cache", ) get_ipython().system('pip install weaviate-client') import weaviate auth_config = weaviate.AuthApiKey(api_key="...") client = weaviate.Client(url="https://...", auth_client_secret=auth_config) from llama_index.vector_stores.weaviate import WeaviateVectorStore vector_store = WeaviateVectorStore( weaviate_client=client, index_name="CachingTest" ) from llama_index.core.node_parser import TokenTextSplitter from llama_index.embeddings.huggingface import HuggingFaceEmbedding text_splitter = TokenTextSplitter(chunk_size=512) embed_model =
HuggingFaceEmbedding(model_name="BAAI/bge-small-en-v1.5")
llama_index.embeddings.huggingface.HuggingFaceEmbedding
from llama_index.core import VectorStoreIndex from llama_index.core.objects import ObjectIndex, SimpleObjectNodeMapping obj1 = {"input": "Hey, how's it going"} obj2 = ["a", "b", "c", "d"] obj3 = "llamaindex is an awesome library!" arbitrary_objects = [obj1, obj2, obj3] obj_node_mapping = SimpleObjectNodeMapping.from_objects(arbitrary_objects) nodes = obj_node_mapping.to_nodes(arbitrary_objects) object_index = ObjectIndex( index=
VectorStoreIndex(nodes=nodes)
llama_index.core.VectorStoreIndex
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('wget "https://github.com/ppasupat/WikiTableQuestions/releases/download/v1.0.2/WikiTableQuestions-1.0.2-compact.zip" -O data.zip') get_ipython().system('unzip data.zip') import pandas as pd from pathlib import Path data_dir = Path("./WikiTableQuestions/csv/200-csv") csv_files = sorted([f for f in data_dir.glob("*.csv")]) dfs = [] for csv_file in csv_files: print(f"processing file: {csv_file}") try: df = pd.read_csv(csv_file) dfs.append(df) except Exception as e: print(f"Error parsing {csv_file}: {str(e)}") tableinfo_dir = "WikiTableQuestions_TableInfo" get_ipython().system('mkdir {tableinfo_dir}') from llama_index.core.program import LLMTextCompletionProgram from llama_index.core.bridge.pydantic import BaseModel, Field from llama_index.llms.openai import OpenAI class TableInfo(BaseModel): """Information regarding a structured table.""" table_name: str = Field( ..., description="table name (must be underscores and NO spaces)" ) table_summary: str = Field( ..., description="short, concise summary/caption of the table" ) prompt_str = """\ Give me a summary of the table with the following JSON format. - The table name must be unique to the table and describe it while being concise. - Do NOT output a generic table name (e.g. table, my_table). Do NOT make the table name one of the following: {exclude_table_name_list} Table: {table_str} Summary: """ program = LLMTextCompletionProgram.from_defaults( output_cls=TableInfo, llm=OpenAI(model="gpt-3.5-turbo"), prompt_template_str=prompt_str, ) import json def _get_tableinfo_with_index(idx: int) -> str: results_gen = Path(tableinfo_dir).glob(f"{idx}_*") results_list = list(results_gen) if len(results_list) == 0: return None elif len(results_list) == 1: path = results_list[0] return TableInfo.parse_file(path) else: raise ValueError( f"More than one file matching index: {list(results_gen)}" ) table_names = set() table_infos = [] for idx, df in enumerate(dfs): table_info = _get_tableinfo_with_index(idx) if table_info: table_infos.append(table_info) else: while True: df_str = df.head(10).to_csv() table_info = program( table_str=df_str, exclude_table_name_list=str(list(table_names)), ) table_name = table_info.table_name print(f"Processed table: {table_name}") if table_name not in table_names: table_names.add(table_name) break else: print(f"Table name {table_name} already exists, trying again.") pass out_file = f"{tableinfo_dir}/{idx}_{table_name}.json" json.dump(table_info.dict(), open(out_file, "w")) table_infos.append(table_info) from sqlalchemy import ( create_engine, MetaData, Table, Column, String, Integer, ) import re def sanitize_column_name(col_name): return re.sub(r"\W+", "_", col_name) def create_table_from_dataframe( df: pd.DataFrame, table_name: str, engine, metadata_obj ): sanitized_columns = {col: sanitize_column_name(col) for col in df.columns} df = df.rename(columns=sanitized_columns) columns = [ Column(col, String if dtype == "object" else Integer) for col, dtype in zip(df.columns, df.dtypes) ] table = Table(table_name, metadata_obj, *columns) metadata_obj.create_all(engine) with engine.connect() as conn: for _, row in df.iterrows(): insert_stmt = table.insert().values(**row.to_dict()) conn.execute(insert_stmt) conn.commit() engine = create_engine("sqlite:///:memory:") metadata_obj = MetaData() for idx, df in enumerate(dfs): tableinfo = _get_tableinfo_with_index(idx) print(f"Creating table: {tableinfo.table_name}") create_table_from_dataframe(df, tableinfo.table_name, engine, metadata_obj) import phoenix as px import llama_index.core px.launch_app() llama_index.core.set_global_handler("arize_phoenix") from llama_index.core.objects import ( SQLTableNodeMapping, ObjectIndex, SQLTableSchema, ) from llama_index.core import SQLDatabase, VectorStoreIndex sql_database = SQLDatabase(engine) table_node_mapping = SQLTableNodeMapping(sql_database) table_schema_objs = [ SQLTableSchema(table_name=t.table_name, context_str=t.table_summary) for t in table_infos ] # add a SQLTableSchema for each table obj_index = ObjectIndex.from_objects( table_schema_objs, table_node_mapping, VectorStoreIndex, ) obj_retriever = obj_index.as_retriever(similarity_top_k=3) from llama_index.core.retrievers import SQLRetriever from typing import List from llama_index.core.query_pipeline import FnComponent sql_retriever = SQLRetriever(sql_database) def get_table_context_str(table_schema_objs: List[SQLTableSchema]): """Get table context string.""" context_strs = [] for table_schema_obj in table_schema_objs: table_info = sql_database.get_single_table_info( table_schema_obj.table_name ) if table_schema_obj.context_str: table_opt_context = " The table description is: " table_opt_context += table_schema_obj.context_str table_info += table_opt_context context_strs.append(table_info) return "\n\n".join(context_strs) table_parser_component =
FnComponent(fn=get_table_context_str)
llama_index.core.query_pipeline.FnComponent
get_ipython().system('pip install llama_index') get_ipython().system('pip install llama_hub') get_ipython().system('pip install torch_geometric') import os from pprint import pprint from llama_index import ( ServiceContext, VectorStoreIndex, SummaryIndex, ) import llama_hub.docstring_walker as docstring_walker walker = docstring_walker.DocstringWalker() path_to_docstring_walker = os.path.dirname(docstring_walker.__file__) example1_docs = walker.load_data(path_to_docstring_walker) print(example1_docs[0].text) example1_index =
VectorStoreIndex(example1_docs)
llama_index.VectorStoreIndex
get_ipython().run_line_magic('pip', 'install llama-index-readers-notion') import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) get_ipython().system('pip install llama-index') from llama_index.core import SummaryIndex from llama_index.readers.notion import NotionPageReader from IPython.display import Markdown, display import os integration_token = os.getenv("NOTION_INTEGRATION_TOKEN") page_ids = ["<page_id>"] documents =
NotionPageReader(integration_token=integration_token)
llama_index.readers.notion.NotionPageReader
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-weaviate') get_ipython().system('pip install llama-index weaviate-client') import os import openai os.environ["OPENAI_API_KEY"] = "sk-<your key here>" openai.api_key = os.environ["OPENAI_API_KEY"] import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) import weaviate resource_owner_config = weaviate.AuthClientPassword( username="", password="", ) client = weaviate.Client( "https://test.weaviate.network", auth_client_secret=resource_owner_config, ) from llama_index.core import VectorStoreIndex from llama_index.vector_stores.weaviate import WeaviateVectorStore from IPython.display import Markdown, display from llama_index.core.schema import TextNode nodes = [ TextNode( text="The Shawshank Redemption", metadata={ "author": "Stephen King", "theme": "Friendship", "year": 1994, }, ), TextNode( text="The Godfather", metadata={ "director": "Francis Ford Coppola", "theme": "Mafia", "year": 1972, }, ), TextNode( text="Inception", metadata={ "director": "Christopher Nolan", "theme": "Fiction", "year": 2010, }, ), TextNode( text="To Kill a Mockingbird", metadata={ "author": "Harper Lee", "theme": "Mafia", "year": 1960, }, ), TextNode( text="1984", metadata={ "author": "George Orwell", "theme": "Totalitarianism", "year": 1949, }, ), TextNode( text="The Great Gatsby", metadata={ "author": "F. Scott Fitzgerald", "theme": "The American Dream", "year": 1925, }, ), TextNode( text="Harry Potter and the Sorcerer's Stone", metadata={ "author": "J.K. Rowling", "theme": "Fiction", "year": 1997, }, ), ] from llama_index.core import StorageContext vector_store = WeaviateVectorStore( weaviate_client=client, index_name="LlamaIndex_filter" ) storage_context = StorageContext.from_defaults(vector_store=vector_store) index =
VectorStoreIndex(nodes, storage_context=storage_context)
llama_index.core.VectorStoreIndex
get_ipython().run_line_magic('pip', 'install llama-index-llms-gemini') get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-google') get_ipython().run_line_magic('pip', 'install llama-index-indices-managed-google') get_ipython().run_line_magic('pip', 'install llama-index-response-synthesizers-google') get_ipython().run_line_magic('pip', 'install llama-index') get_ipython().run_line_magic('pip', 'install "google-ai-generativelanguage>=0.4,<=1.0"') get_ipython().run_line_magic('pip', 'install google-auth-oauthlib') from google.oauth2 import service_account from llama_index.vector_stores.google import set_google_config credentials = service_account.Credentials.from_service_account_file( "service_account_key.json", scopes=[ "https://www.googleapis.com/auth/generative-language.retriever", ], ) set_google_config(auth_credentials=credentials) get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") import llama_index.core.vector_stores.google.generativeai.genai_extension as genaix from typing import Iterable from random import randrange LLAMA_INDEX_COLAB_CORPUS_ID_PREFIX = f"llama-index-colab" SESSION_CORPUS_ID_PREFIX = ( f"{LLAMA_INDEX_COLAB_CORPUS_ID_PREFIX}-{randrange(1000000)}" ) def corpus_id(num_id: int) -> str: return f"{SESSION_CORPUS_ID_PREFIX}-{num_id}" SESSION_CORPUS_ID = corpus_id(1) def list_corpora() -> Iterable[genaix.Corpus]: client = genaix.build_semantic_retriever() yield from genaix.list_corpora(client=client) def delete_corpus(*, corpus_id: str) -> None: client = genaix.build_semantic_retriever() genaix.delete_corpus(corpus_id=corpus_id, client=client) def cleanup_colab_corpora(): for corpus in list_corpora(): if corpus.corpus_id.startswith(LLAMA_INDEX_COLAB_CORPUS_ID_PREFIX): try: delete_corpus(corpus_id=corpus.corpus_id) print(f"Deleted corpus {corpus.corpus_id}.") except Exception: pass cleanup_colab_corpora() from llama_index.core import SimpleDirectoryReader from llama_index.indices.managed.google import GoogleIndex from llama_index.core import Response import time index = GoogleIndex.create_corpus( corpus_id=SESSION_CORPUS_ID, display_name="My first corpus!" ) print(f"Newly created corpus ID is {index.corpus_id}.") documents = SimpleDirectoryReader("./data/paul_graham/").load_data() index.insert_documents(documents) for corpus in list_corpora(): print(corpus) query_engine = index.as_query_engine() response = query_engine.query("What did Paul Graham do growing up?") assert isinstance(response, Response) print(f"Response is {response.response}") for cited_text in [node.text for node in response.source_nodes]: print(f"Cited text: {cited_text}") if response.metadata: print( f"Answerability: {response.metadata.get('answerable_probability', 0)}" ) index = GoogleIndex.from_corpus(corpus_id=SESSION_CORPUS_ID) query_engine = index.as_query_engine() response = query_engine.query("Which company did Paul Graham build?") assert isinstance(response, Response) print(f"Response is {response.response}") from llama_index.core.schema import NodeRelationship, RelatedNodeInfo, TextNode index = GoogleIndex.from_corpus(corpus_id=SESSION_CORPUS_ID) index.insert_nodes( [ TextNode( text="It was the best of times.", relationships={ NodeRelationship.SOURCE: RelatedNodeInfo( node_id="123", metadata={"file_name": "Tale of Two Cities"}, ) }, ), TextNode( text="It was the worst of times.", relationships={ NodeRelationship.SOURCE: RelatedNodeInfo( node_id="123", metadata={"file_name": "Tale of Two Cities"}, ) }, ), TextNode( text="Bugs Bunny: Wassup doc?", relationships={ NodeRelationship.SOURCE: RelatedNodeInfo( node_id="456", metadata={"file_name": "Bugs Bunny Adventure"}, ) }, ), ] ) from google.ai.generativelanguage import ( GenerateAnswerRequest, HarmCategory, SafetySetting, ) index = GoogleIndex.from_corpus(corpus_id=SESSION_CORPUS_ID) query_engine = index.as_query_engine( temperature=0.2, answer_style=GenerateAnswerRequest.AnswerStyle.ABSTRACTIVE, safety_setting=[ SafetySetting( category=HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT, threshold=SafetySetting.HarmBlockThreshold.BLOCK_LOW_AND_ABOVE, ), SafetySetting( category=HarmCategory.HARM_CATEGORY_VIOLENCE, threshold=SafetySetting.HarmBlockThreshold.BLOCK_ONLY_HIGH, ), ], ) response = query_engine.query("What was Bugs Bunny's favorite saying?") print(response) from llama_index.core import Response response = query_engine.query("What were Paul Graham's achievements?") assert isinstance(response, Response) print(f"Response is {response.response}") for cited_text in [node.text for node in response.source_nodes]: print(f"Cited text: {cited_text}") if response.metadata: print( f"Answerability: {response.metadata.get('answerable_probability', 0)}" ) from llama_index.llms.gemini import Gemini GEMINI_API_KEY = "" # @param {type:"string"} gemini = Gemini(api_key=GEMINI_API_KEY) from llama_index.response_synthesizers.google import GoogleTextSynthesizer from llama_index.vector_stores.google import GoogleVectorStore from llama_index.core import VectorStoreIndex from llama_index.core.postprocessor import LLMRerank from llama_index.core.query_engine import RetrieverQueryEngine from llama_index.core.retrievers import VectorIndexRetriever store = GoogleVectorStore.from_corpus(corpus_id=SESSION_CORPUS_ID) index = VectorStoreIndex.from_vector_store( vector_store=store, ) response_synthesizer = GoogleTextSynthesizer.from_defaults( temperature=0.2, answer_style=GenerateAnswerRequest.AnswerStyle.ABSTRACTIVE, ) reranker = LLMRerank( top_n=10, llm=gemini, ) query_engine = RetrieverQueryEngine.from_args( retriever=VectorIndexRetriever( index=index, similarity_top_k=20, ), node_postprocessors=[reranker], response_synthesizer=response_synthesizer, ) response = query_engine.query("What were Paul Graham's achievements?") print(response) from llama_index.core.indices.query.query_transform.base import ( StepDecomposeQueryTransform, ) from llama_index.core.query_engine import MultiStepQueryEngine store = GoogleVectorStore.from_corpus(corpus_id=SESSION_CORPUS_ID) index = VectorStoreIndex.from_vector_store( vector_store=store, ) response_synthesizer = GoogleTextSynthesizer.from_defaults( temperature=0.2, answer_style=GenerateAnswerRequest.AnswerStyle.ABSTRACTIVE, ) single_step_query_engine = index.as_query_engine( similarity_top_k=10, response_synthesizer=response_synthesizer, ) step_decompose_transform = StepDecomposeQueryTransform( llm=gemini, verbose=True, ) query_engine = MultiStepQueryEngine( query_engine=single_step_query_engine, query_transform=step_decompose_transform, response_synthesizer=response_synthesizer, index_summary="Ask me anything.", num_steps=6, ) response = query_engine.query("What were Paul Graham's achievements?") print(response) from llama_index.core.indices.query.query_transform import HyDEQueryTransform from llama_index.core.query_engine import TransformQueryEngine store =
GoogleVectorStore.from_corpus(corpus_id=SESSION_CORPUS_ID)
llama_index.vector_stores.google.GoogleVectorStore.from_corpus
get_ipython().run_line_magic('pip', 'install llama-index-agent-openai') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') import json from typing import Sequence, List from llama_index.llms.openai import OpenAI from llama_index.core.llms import ChatMessage from llama_index.core.tools import BaseTool, FunctionTool import nest_asyncio nest_asyncio.apply() def multiply(a: int, b: int) -> int: """Multiple two integers and returns the result integer""" return a * b multiply_tool = FunctionTool.from_defaults(fn=multiply) def add(a: int, b: int) -> int: """Add two integers and returns the result integer""" return a + b add_tool = FunctionTool.from_defaults(fn=add) tools = [multiply_tool, add_tool] llm = OpenAI(model="gpt-3.5-turbo") from llama_index.core.agent import AgentRunner from llama_index.agent.openai import OpenAIAgentWorker, OpenAIAgent agent = OpenAIAgent.from_tools(tools, llm=llm, verbose=True) agent.chat("Hi") response = agent.chat("What is (121 * 3) + 42?") response task = agent.create_task("What is (121 * 3) + 42?") step_output = agent.run_step(task.task_id) step_output step_output = agent.run_step(task.task_id) step_output = agent.run_step(task.task_id) print(step_output.is_last) response = agent.finalize_response(task.task_id) print(str(response)) llm = OpenAI(model="gpt-4-1106-preview") from llama_index.core.agent import AgentRunner, ReActAgentWorker, ReActAgent agent =
ReActAgent.from_tools(tools, llm=llm, verbose=True)
llama_index.core.agent.ReActAgent.from_tools
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-mongodb') get_ipython().run_line_magic('pip', 'install llama-index-storage-index-store-mongodb') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') import nest_asyncio nest_asyncio.apply() import logging import sys import os logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) from llama_index.core import SimpleDirectoryReader, StorageContext from llama_index.core import VectorStoreIndex, SimpleKeywordTableIndex from llama_index.core import SummaryIndex from llama_index.core import ComposableGraph from llama_index.llms.openai import OpenAI from llama_index.core.response.notebook_utils import display_response from llama_index.core import Settings get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") reader = SimpleDirectoryReader("./data/paul_graham/") documents = reader.load_data() from llama_index.core.node_parser import SentenceSplitter nodes = SentenceSplitter().get_nodes_from_documents(documents) MONGO_URI = os.environ["MONGO_URI"] from llama_index.storage.docstore.mongodb import MongoDocumentStore from llama_index.storage.index_store.mongodb import MongoIndexStore storage_context = StorageContext.from_defaults( docstore=MongoDocumentStore.from_uri(uri=MONGO_URI), index_store=
MongoIndexStore.from_uri(uri=MONGO_URI)
llama_index.storage.index_store.mongodb.MongoIndexStore.from_uri
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-readers-file') import nest_asyncio nest_asyncio.apply() get_ipython().system('mkdir data && wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"') get_ipython().system('pip install llama_hub') from pathlib import Path from llama_index.readers.file import PyMuPDFReader from llama_index.core import Document from llama_index.core.node_parser import SentenceSplitter from llama_index.core.schema import IndexNode docs0 = PyMuPDFReader().load(file_path=Path("./data/llama2.pdf")) doc_text = "\n\n".join([d.get_content() for d in docs0]) docs = [Document(text=doc_text)] node_parser = SentenceSplitter(chunk_size=1024) base_nodes = node_parser.get_nodes_from_documents(docs) from llama_index.core import VectorStoreIndex from llama_index.llms.openai import OpenAI from llama_index.core import Settings Settings.llm = OpenAI(model="gpt-3.5-turbo") index = VectorStoreIndex(base_nodes) query_engine = index.as_query_engine(similarity_top_k=2) get_ipython().system('wget "https://www.dropbox.com/scl/fi/fh9vsmmm8vu0j50l3ss38/llama2_eval_qr_dataset.json?rlkey=kkoaez7aqeb4z25gzc06ak6kb&dl=1" -O data/llama2_eval_qr_dataset.json') from llama_index.core.evaluation import QueryResponseDataset eval_dataset = QueryResponseDataset.from_json( "data/llama2_eval_qr_dataset.json" ) from llama_index.core.evaluation.eval_utils import get_responses from llama_index.core.evaluation import CorrectnessEvaluator, BatchEvalRunner evaluator_c = CorrectnessEvaluator() evaluator_dict = {"correctness": evaluator_c} batch_runner = BatchEvalRunner(evaluator_dict, workers=2, show_progress=True) import numpy as np async def get_correctness(query_engine, eval_qa_pairs, batch_runner): eval_qs = [q for q, _ in eval_qa_pairs] eval_answers = [a for _, a in eval_qa_pairs] pred_responses =
get_responses(eval_qs, query_engine, show_progress=True)
llama_index.core.evaluation.eval_utils.get_responses
get_ipython().system('pip install llama-index') get_ipython().system('pip install duckdb') get_ipython().system('pip install llama-index-vector-stores-duckdb') from llama_index.core import VectorStoreIndex, SimpleDirectoryReader from llama_index.vector_stores.duckdb import DuckDBVectorStore from llama_index.core import StorageContext from IPython.display import Markdown, display import os import openai openai.api_key = os.environ["OPENAI_API_KEY"] get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") documents = SimpleDirectoryReader("data/paul_graham/").load_data() vector_store = DuckDBVectorStore() storage_context = StorageContext.from_defaults(vector_store=vector_store) index = VectorStoreIndex.from_documents( documents, storage_context=storage_context ) query_engine = index.as_query_engine() response = query_engine.query("What did the author do growing up?") display(Markdown(f"<b>{response}</b>")) documents = SimpleDirectoryReader("data/paul_graham/").load_data() vector_store = DuckDBVectorStore("pg.duckdb", persist_dir="./persist/") storage_context = StorageContext.from_defaults(vector_store=vector_store) index = VectorStoreIndex.from_documents( documents, storage_context=storage_context ) vector_store = DuckDBVectorStore.from_local("./persist/pg.duckdb") index = VectorStoreIndex.from_vector_store(vector_store) query_engine = index.as_query_engine() response = query_engine.query("What did the author do growing up?") display(Markdown(f"<b>{response}</b>")) from llama_index.core.schema import TextNode nodes = [ TextNode( **{ "text": "The Shawshank Redemption", "metadata": { "author": "Stephen King", "theme": "Friendship", "year": 1994, "ref_doc_id": "doc_1", }, } ), TextNode( **{ "text": "The Godfather", "metadata": { "director": "Francis Ford Coppola", "theme": "Mafia", "year": 1972, "ref_doc_id": "doc_1", }, } ), TextNode( **{ "text": "Inception", "metadata": { "director": "Christopher Nolan", "theme": "Sci-fi", "year": 2010, "ref_doc_id": "doc_2", }, } ), ] vector_store = DuckDBVectorStore() storage_context = StorageContext.from_defaults(vector_store=vector_store) index = VectorStoreIndex(nodes, storage_context=storage_context) from llama_index.core.vector_stores import ExactMatchFilter, MetadataFilters filters = MetadataFilters( filters=[
ExactMatchFilter(key="theme", value="Mafia")
llama_index.core.vector_stores.ExactMatchFilter
import openai openai.api_key = "sk-key" from llama_index.agent import OpenAIAgent from llama_index.tools.code_interpreter.base import CodeInterpreterToolSpec code_spec =
CodeInterpreterToolSpec()
llama_index.tools.code_interpreter.base.CodeInterpreterToolSpec
get_ipython().system('pip install llama-index') get_ipython().system('pip install duckdb') get_ipython().system('pip install llama-index-vector-stores-duckdb') from llama_index.core import VectorStoreIndex, SimpleDirectoryReader from llama_index.vector_stores.duckdb import DuckDBVectorStore from llama_index.core import StorageContext from IPython.display import Markdown, display import os import openai openai.api_key = os.environ["OPENAI_API_KEY"] get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") documents = SimpleDirectoryReader("data/paul_graham/").load_data() vector_store = DuckDBVectorStore() storage_context = StorageContext.from_defaults(vector_store=vector_store) index = VectorStoreIndex.from_documents( documents, storage_context=storage_context ) query_engine = index.as_query_engine() response = query_engine.query("What did the author do growing up?") display(Markdown(f"<b>{response}</b>")) documents = SimpleDirectoryReader("data/paul_graham/").load_data() vector_store = DuckDBVectorStore("pg.duckdb", persist_dir="./persist/") storage_context =
StorageContext.from_defaults(vector_store=vector_store)
llama_index.core.StorageContext.from_defaults
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-program-evaporate') get_ipython().system('pip install llama-index') get_ipython().run_line_magic('load_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '2') wiki_titles = ["Toronto", "Seattle", "Chicago", "Boston", "Houston"] from pathlib import Path import requests for title in wiki_titles: response = requests.get( "https://en.wikipedia.org/w/api.php", params={ "action": "query", "format": "json", "titles": title, "prop": "extracts", "explaintext": True, }, ).json() page = next(iter(response["query"]["pages"].values())) wiki_text = page["extract"] data_path = Path("data") if not data_path.exists(): Path.mkdir(data_path) with open(data_path / f"{title}.txt", "w") as fp: fp.write(wiki_text) from llama_index.core import SimpleDirectoryReader city_docs = {} for wiki_title in wiki_titles: city_docs[wiki_title] = SimpleDirectoryReader( input_files=[f"data/{wiki_title}.txt"] ).load_data() from llama_index.llms.openai import OpenAI from llama_index.core import Settings Settings.llm = OpenAI(temperature=0, model="gpt-3.5-turbo") Settings.chunk_size = 512 city_nodes = {} for wiki_title in wiki_titles: docs = city_docs[wiki_title] nodes = Settings.node_parser.get_nodes_from_documents(docs) city_nodes[wiki_title] = nodes from llama_index.program.evaporate import DFEvaporateProgram program = DFEvaporateProgram.from_defaults( fields_to_extract=["population"], ) program.fit_fields(city_nodes["Toronto"][:1]) print(program.get_function_str("population")) seattle_df = program(nodes=city_nodes["Seattle"][:1]) seattle_df Settings.llm = OpenAI(temperature=0, model="gpt-4") Settings.chunk_size = 1024 Settings.chunk_overlap = 0 from llama_index.core.data_structs import Node train_text = """ <table class="wikitable sortable" style="margin-top:0; text-align:center; font-size:90%;"> <tbody><tr> <th>Team (IOC code) </th> <th>No. Summer </th> <th>No. Winter </th> <th>No. Games </th></tr> <tr> <td align="left"><span id="ALB"><img alt="" src="//upload.wikimedia.org/wikipedia/commons/thumb/3/36/Flag_of_Albania.svg/22px-Flag_of_Albania.svg.png" decoding="async" width="22" height="16" class="thumbborder" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/3/36/Flag_of_Albania.svg/33px-Flag_of_Albania.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/3/36/Flag_of_Albania.svg/44px-Flag_of_Albania.svg.png 2x" data-file-width="980" data-file-height="700" />&#160;<a href="/wiki/Albania_at_the_Olympics" title="Albania at the Olympics">Albania</a>&#160;<span style="font-size:90%;">(ALB)</span></span> </td> <td style="background:#f2f2ce;">9</td> <td style="background:#cedff2;">5</td> <td>14 </td></tr> <tr> <td align="left"><span id="ASA"><img alt="" src="//upload.wikimedia.org/wikipedia/commons/thumb/8/87/Flag_of_American_Samoa.svg/22px-Flag_of_American_Samoa.svg.png" decoding="async" width="22" height="11" class="thumbborder" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/8/87/Flag_of_American_Samoa.svg/33px-Flag_of_American_Samoa.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/8/87/Flag_of_American_Samoa.svg/44px-Flag_of_American_Samoa.svg.png 2x" data-file-width="1000" data-file-height="500" />&#160;<a href="/wiki/American_Samoa_at_the_Olympics" title="American Samoa at the Olympics">American Samoa</a>&#160;<span style="font-size:90%;">(ASA)</span></span> </td> <td style="background:#f2f2ce;">9</td> <td style="background:#cedff2;">2</td> <td>11 </td></tr> <tr> <td align="left"><span id="AND"><img alt="" src="//upload.wikimedia.org/wikipedia/commons/thumb/1/19/Flag_of_Andorra.svg/22px-Flag_of_Andorra.svg.png" decoding="async" width="22" height="15" class="thumbborder" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/1/19/Flag_of_Andorra.svg/33px-Flag_of_Andorra.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/1/19/Flag_of_Andorra.svg/44px-Flag_of_Andorra.svg.png 2x" data-file-width="1000" data-file-height="700" />&#160;<a href="/wiki/Andorra_at_the_Olympics" title="Andorra at the Olympics">Andorra</a>&#160;<span style="font-size:90%;">(AND)</span></span> </td> <td style="background:#f2f2ce;">12</td> <td style="background:#cedff2;">13</td> <td>25 </td></tr> <tr> <td align="left"><span id="ANG"><img alt="" src="//upload.wikimedia.org/wikipedia/commons/thumb/9/9d/Flag_of_Angola.svg/22px-Flag_of_Angola.svg.png" decoding="async" width="22" height="15" class="thumbborder" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/9/9d/Flag_of_Angola.svg/33px-Flag_of_Angola.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/9/9d/Flag_of_Angola.svg/44px-Flag_of_Angola.svg.png 2x" data-file-width="900" data-file-height="600" />&#160;<a href="/wiki/Angola_at_the_Olympics" title="Angola at the Olympics">Angola</a>&#160;<span style="font-size:90%;">(ANG)</span></span> </td> <td style="background:#f2f2ce;">10</td> <td style="background:#cedff2;">0</td> <td>10 </td></tr> <tr> <td align="left"><span id="ANT"><img alt="" src="//upload.wikimedia.org/wikipedia/commons/thumb/8/89/Flag_of_Antigua_and_Barbuda.svg/22px-Flag_of_Antigua_and_Barbuda.svg.png" decoding="async" width="22" height="15" class="thumbborder" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/8/89/Flag_of_Antigua_and_Barbuda.svg/33px-Flag_of_Antigua_and_Barbuda.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/8/89/Flag_of_Antigua_and_Barbuda.svg/44px-Flag_of_Antigua_and_Barbuda.svg.png 2x" data-file-width="900" data-file-height="600" />&#160;<a href="/wiki/Antigua_and_Barbuda_at_the_Olympics" title="Antigua and Barbuda at the Olympics">Antigua and Barbuda</a>&#160;<span style="font-size:90%;">(ANT)</span></span> </td> <td style="background:#f2f2ce;">11</td> <td style="background:#cedff2;">0</td> <td>11 </td></tr> <tr> <td align="left"><span id="ARU"><img alt="" src="//upload.wikimedia.org/wikipedia/commons/thumb/f/f6/Flag_of_Aruba.svg/22px-Flag_of_Aruba.svg.png" decoding="async" width="22" height="15" class="thumbborder" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/f/f6/Flag_of_Aruba.svg/33px-Flag_of_Aruba.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/f/f6/Flag_of_Aruba.svg/44px-Flag_of_Aruba.svg.png 2x" data-file-width="900" data-file-height="600" />&#160;<a href="/wiki/Aruba_at_the_Olympics" title="Aruba at the Olympics">Aruba</a>&#160;<span style="font-size:90%;">(ARU)</span></span> </td> <td style="background:#f2f2ce;">9</td> <td style="background:#cedff2;">0</td> <td>9 </td></tr> """ train_nodes = [Node(text=train_text)] infer_text = """ <td align="left"><span id="BAN"><img alt="" src="//upload.wikimedia.org/wikipedia/commons/thumb/f/f9/Flag_of_Bangladesh.svg/22px-Flag_of_Bangladesh.svg.png" decoding="async" width="22" height="13" class="thumbborder" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/f/f9/Flag_of_Bangladesh.svg/33px-Flag_of_Bangladesh.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/f/f9/Flag_of_Bangladesh.svg/44px-Flag_of_Bangladesh.svg.png 2x" data-file-width="1000" data-file-height="600" />&#160;<a href="/wiki/Bangladesh_at_the_Olympics" title="Bangladesh at the Olympics">Bangladesh</a>&#160;<span style="font-size:90%;">(BAN)</span></span> </td> <td style="background:#f2f2ce;">10</td> <td style="background:#cedff2;">0</td> <td>10 </td></tr> <tr> <td align="left"><span id="BIZ"><img alt="" src="//upload.wikimedia.org/wikipedia/commons/thumb/e/e7/Flag_of_Belize.svg/22px-Flag_of_Belize.svg.png" decoding="async" width="22" height="13" class="thumbborder" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/e/e7/Flag_of_Belize.svg/33px-Flag_of_Belize.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/e/e7/Flag_of_Belize.svg/44px-Flag_of_Belize.svg.png 2x" data-file-width="1000" data-file-height="600" />&#160;<a href="/wiki/Belize_at_the_Olympics" title="Belize at the Olympics">Belize</a>&#160;<span style="font-size:90%;">(BIZ)</span></span> <sup class="reference" id="ref_BIZBIZ"><a href="#endnote_BIZBIZ">[BIZ]</a></sup> </td> <td style="background:#f2f2ce;">13</td> <td style="background:#cedff2;">0</td> <td>13 </td></tr> <tr> <td align="left"><span id="BEN"><img alt="" src="//upload.wikimedia.org/wikipedia/commons/thumb/0/0a/Flag_of_Benin.svg/22px-Flag_of_Benin.svg.png" decoding="async" width="22" height="15" class="thumbborder" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/0/0a/Flag_of_Benin.svg/33px-Flag_of_Benin.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/0/0a/Flag_of_Benin.svg/44px-Flag_of_Benin.svg.png 2x" data-file-width="900" data-file-height="600" />&#160;<a href="/wiki/Benin_at_the_Olympics" title="Benin at the Olympics">Benin</a>&#160;<span style="font-size:90%;">(BEN)</span></span> <sup class="reference" id="ref_BENBEN"><a href="#endnote_BENBEN">[BEN]</a></sup> </td> <td style="background:#f2f2ce;">12</td> <td style="background:#cedff2;">0</td> <td>12 </td></tr> <tr> <td align="left"><span id="BHU"><img alt="" src="//upload.wikimedia.org/wikipedia/commons/thumb/9/91/Flag_of_Bhutan.svg/22px-Flag_of_Bhutan.svg.png" decoding="async" width="22" height="15" class="thumbborder" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/9/91/Flag_of_Bhutan.svg/33px-Flag_of_Bhutan.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/9/91/Flag_of_Bhutan.svg/44px-Flag_of_Bhutan.svg.png 2x" data-file-width="900" data-file-height="600" />&#160;<a href="/wiki/Bhutan_at_the_Olympics" title="Bhutan at the Olympics">Bhutan</a>&#160;<span style="font-size:90%;">(BHU)</span></span> </td> <td style="background:#f2f2ce;">10</td> <td style="background:#cedff2;">0</td> <td>10 </td></tr> <tr> <td align="left"><span id="BOL"><img alt="" src="//upload.wikimedia.org/wikipedia/commons/thumb/4/48/Flag_of_Bolivia.svg/22px-Flag_of_Bolivia.svg.png" decoding="async" width="22" height="15" class="thumbborder" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/4/48/Flag_of_Bolivia.svg/33px-Flag_of_Bolivia.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/4/48/Flag_of_Bolivia.svg/44px-Flag_of_Bolivia.svg.png 2x" data-file-width="1100" data-file-height="750" />&#160;<a href="/wiki/Bolivia_at_the_Olympics" title="Bolivia at the Olympics">Bolivia</a>&#160;<span style="font-size:90%;">(BOL)</span></span> </td> <td style="background:#f2f2ce;">15</td> <td style="background:#cedff2;">7</td> <td>22 </td></tr> <tr> <td align="left"><span id="BIH"><img alt="" src="//upload.wikimedia.org/wikipedia/commons/thumb/b/bf/Flag_of_Bosnia_and_Herzegovina.svg/22px-Flag_of_Bosnia_and_Herzegovina.svg.png" decoding="async" width="22" height="11" class="thumbborder" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/b/bf/Flag_of_Bosnia_and_Herzegovina.svg/33px-Flag_of_Bosnia_and_Herzegovina.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/b/bf/Flag_of_Bosnia_and_Herzegovina.svg/44px-Flag_of_Bosnia_and_Herzegovina.svg.png 2x" data-file-width="800" data-file-height="400" />&#160;<a href="/wiki/Bosnia_and_Herzegovina_at_the_Olympics" title="Bosnia and Herzegovina at the Olympics">Bosnia and Herzegovina</a>&#160;<span style="font-size:90%;">(BIH)</span></span> </td> <td style="background:#f2f2ce;">8</td> <td style="background:#cedff2;">8</td> <td>16 </td></tr> <tr> <td align="left"><span id="IVB"><img alt="" src="//upload.wikimedia.org/wikipedia/commons/thumb/4/42/Flag_of_the_British_Virgin_Islands.svg/22px-Flag_of_the_British_Virgin_Islands.svg.png" decoding="async" width="22" height="11" class="thumbborder" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/4/42/Flag_of_the_British_Virgin_Islands.svg/33px-Flag_of_the_British_Virgin_Islands.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/4/42/Flag_of_the_British_Virgin_Islands.svg/44px-Flag_of_the_British_Virgin_Islands.svg.png 2x" data-file-width="1200" data-file-height="600" />&#160;<a href="/wiki/British_Virgin_Islands_at_the_Olympics" title="British Virgin Islands at the Olympics">British Virgin Islands</a>&#160;<span style="font-size:90%;">(IVB)</span></span> </td> <td style="background:#f2f2ce;">10</td> <td style="background:#cedff2;">2</td> <td>12 </td></tr> <tr> <td align="left"><span id="BRU"><img alt="" src="//upload.wikimedia.org/wikipedia/commons/thumb/9/9c/Flag_of_Brunei.svg/22px-Flag_of_Brunei.svg.png" decoding="async" width="22" height="11" class="thumbborder" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/9/9c/Flag_of_Brunei.svg/33px-Flag_of_Brunei.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/9/9c/Flag_of_Brunei.svg/44px-Flag_of_Brunei.svg.png 2x" data-file-width="1440" data-file-height="720" />&#160;<a href="/wiki/Brunei_at_the_Olympics" title="Brunei at the Olympics">Brunei</a>&#160;<span style="font-size:90%;">(BRU)</span></span> <sup class="reference" id="ref_AA"><a href="#endnote_AA">[A]</a></sup> </td> <td style="background:#f2f2ce;">6</td> <td style="background:#cedff2;">0</td> <td>6 </td></tr> <tr> <td align="left"><span id="CAM"><img alt="" src="//upload.wikimedia.org/wikipedia/commons/thumb/8/83/Flag_of_Cambodia.svg/22px-Flag_of_Cambodia.svg.png" decoding="async" width="22" height="14" class="thumbborder" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/8/83/Flag_of_Cambodia.svg/33px-Flag_of_Cambodia.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/8/83/Flag_of_Cambodia.svg/44px-Flag_of_Cambodia.svg.png 2x" data-file-width="1000" data-file-height="640" />&#160;<a href="/wiki/Cambodia_at_the_Olympics" title="Cambodia at the Olympics">Cambodia</a>&#160;<span style="font-size:90%;">(CAM)</span></span> </td> <td style="background:#f2f2ce;">10</td> <td style="background:#cedff2;">0</td> <td>10 </td></tr> <tr> <td align="left"><span id="CPV"><img alt="" src="//upload.wikimedia.org/wikipedia/commons/thumb/3/38/Flag_of_Cape_Verde.svg/22px-Flag_of_Cape_Verde.svg.png" decoding="async" width="22" height="13" class="thumbborder" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/3/38/Flag_of_Cape_Verde.svg/33px-Flag_of_Cape_Verde.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/3/38/Flag_of_Cape_Verde.svg/44px-Flag_of_Cape_Verde.svg.png 2x" data-file-width="1020" data-file-height="600" />&#160;<a href="/wiki/Cape_Verde_at_the_Olympics" title="Cape Verde at the Olympics">Cape Verde</a>&#160;<span style="font-size:90%;">(CPV)</span></span> </td> <td style="background:#f2f2ce;">7</td> <td style="background:#cedff2;">0</td> <td>7 </td></tr> <tr> <td align="left"><span id="CAY"><img alt="" src="//upload.wikimedia.org/wikipedia/commons/thumb/0/0f/Flag_of_the_Cayman_Islands.svg/22px-Flag_of_the_Cayman_Islands.svg.png" decoding="async" width="22" height="11" class="thumbborder" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/0/0f/Flag_of_the_Cayman_Islands.svg/33px-Flag_of_the_Cayman_Islands.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/0/0f/Flag_of_the_Cayman_Islands.svg/44px-Flag_of_the_Cayman_Islands.svg.png 2x" data-file-width="1200" data-file-height="600" />&#160;<a href="/wiki/Cayman_Islands_at_the_Olympics" title="Cayman Islands at the Olympics">Cayman Islands</a>&#160;<span style="font-size:90%;">(CAY)</span></span> </td> <td style="background:#f2f2ce;">11</td> <td style="background:#cedff2;">2</td> <td>13 </td></tr> <tr> <td align="left"><span id="CAF"><img alt="" src="//upload.wikimedia.org/wikipedia/commons/thumb/6/6f/Flag_of_the_Central_African_Republic.svg/22px-Flag_of_the_Central_African_Republic.svg.png" decoding="async" width="22" height="15" class="thumbborder" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/6/6f/Flag_of_the_Central_African_Republic.svg/33px-Flag_of_the_Central_African_Republic.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/6/6f/Flag_of_the_Central_African_Republic.svg/44px-Flag_of_the_Central_African_Republic.svg.png 2x" data-file-width="900" data-file-height="600" />&#160;<a href="/wiki/Central_African_Republic_at_the_Olympics" title="Central African Republic at the Olympics">Central African Republic</a>&#160;<span style="font-size:90%;">(CAF)</span></span> </td> <td style="background:#f2f2ce;">11</td> <td style="background:#cedff2;">0</td> <td>11 </td></tr> <tr> <td align="left"><span id="CHA"><img alt="" src="//upload.wikimedia.org/wikipedia/commons/thumb/4/4b/Flag_of_Chad.svg/22px-Flag_of_Chad.svg.png" decoding="async" width="22" height="15" class="thumbborder" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/4/4b/Flag_of_Chad.svg/33px-Flag_of_Chad.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/4/4b/Flag_of_Chad.svg/44px-Flag_of_Chad.svg.png 2x" data-file-width="900" data-file-height="600" />&#160;<a href="/wiki/Chad_at_the_Olympics" title="Chad at the Olympics">Chad</a>&#160;<span style="font-size:90%;">(CHA)</span></span> </td> <td style="background:#f2f2ce;">13</td> <td style="background:#cedff2;">0</td> <td>13 </td></tr> <tr> <td align="left"><span id="COM"><img alt="" src="//upload.wikimedia.org/wikipedia/commons/thumb/9/94/Flag_of_the_Comoros.svg/22px-Flag_of_the_Comoros.svg.png" decoding="async" width="22" height="13" class="thumbborder" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/9/94/Flag_of_the_Comoros.svg/33px-Flag_of_the_Comoros.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/9/94/Flag_of_the_Comoros.svg/44px-Flag_of_the_Comoros.svg.png 2x" data-file-width="1000" data-file-height="600" />&#160;<a href="/wiki/Comoros_at_the_Olympics" title="Comoros at the Olympics">Comoros</a>&#160;<span style="font-size:90%;">(COM)</span></span> </td> <td style="background:#f2f2ce;">7</td> <td style="background:#cedff2;">0</td> <td>7 </td></tr> <tr> <td align="left"><span id="CGO"><img alt="" src="//upload.wikimedia.org/wikipedia/commons/thumb/9/92/Flag_of_the_Republic_of_the_Congo.svg/22px-Flag_of_the_Republic_of_the_Congo.svg.png" decoding="async" width="22" height="15" class="thumbborder" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/9/92/Flag_of_the_Republic_of_the_Congo.svg/33px-Flag_of_the_Republic_of_the_Congo.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/9/92/Flag_of_the_Republic_of_the_Congo.svg/44px-Flag_of_the_Republic_of_the_Congo.svg.png 2x" data-file-width="900" data-file-height="600" />&#160;<a href="/wiki/Republic_of_the_Congo_at_the_Olympics" title="Republic of the Congo at the Olympics">Republic of the Congo</a>&#160;<span style="font-size:90%;">(CGO)</span></span> </td> <td style="background:#f2f2ce;">13</td> <td style="background:#cedff2;">0</td> <td>13 </td></tr> <tr> <td align="left"><span id="COD"><img alt="" src="//upload.wikimedia.org/wikipedia/commons/thumb/6/6f/Flag_of_the_Democratic_Republic_of_the_Congo.svg/22px-Flag_of_the_Democratic_Republic_of_the_Congo.svg.png" decoding="async" width="22" height="17" class="thumbborder" srcset="//upload.wikimedia.org/wikipedia/commons/thumb/6/6f/Flag_of_the_Democratic_Republic_of_the_Congo.svg/33px-Flag_of_the_Democratic_Republic_of_the_Congo.svg.png 1.5x, //upload.wikimedia.org/wikipedia/commons/thumb/6/6f/Flag_of_the_Democratic_Republic_of_the_Congo.svg/44px-Flag_of_the_Democratic_Republic_of_the_Congo.svg.png 2x" data-file-width="800" data-file-height="600" />&#160;<a href="/wiki/Democratic_Republic_of_the_Congo_at_the_Olympics" title="Democratic Republic of the Congo at the Olympics">Democratic Republic of the Congo</a>&#160;<span style="font-size:90%;">(COD)</span></span> <sup class="reference" id="ref_CODCOD"><a href="#endnote_CODCOD">[COD]</a></sup> </td> <td style="background:#f2f2ce;">11</td> <td style="background:#cedff2;">0</td> <td>11 </td></tr> """ infer_nodes = [
Node(text=infer_text)
llama_index.core.data_structs.Node
get_ipython().run_line_magic('pip', 'install llama-index llama-index-vector-stores-qdrant -q') import nest_asyncio nest_asyncio.apply() get_ipython().system('mkdir data') get_ipython().system('wget "https://arxiv.org/pdf/2402.09353.pdf" -O "./data/dorav1.pdf"') from llama_index.llms.openai import OpenAI llm = OpenAI(model="gpt-4") response = llm.complete("What is DoRA?") print(response.text) """Load the data. With llama-index, before any transformations are applied, data is loaded in the `Document` abstraction, which is a container that holds the text of the document. """ from llama_index.core import SimpleDirectoryReader loader = SimpleDirectoryReader(input_dir="./data") documents = loader.load_data() """Chunk, Encode, and Store into a Vector Store. To streamline the process, we can make use of the IngestionPipeline class that will apply your specified transformations to the Document's. """ from llama_index.core.ingestion import IngestionPipeline from llama_index.core.node_parser import SentenceSplitter from llama_index.embeddings.openai import OpenAIEmbedding from llama_index.vector_stores.qdrant import QdrantVectorStore import qdrant_client client = qdrant_client.QdrantClient(location=":memory:") vector_store = QdrantVectorStore(client=client, collection_name="test_store") pipeline = IngestionPipeline( transformations=[ SentenceSplitter(),
OpenAIEmbedding()
llama_index.embeddings.openai.OpenAIEmbedding
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') import logging import sys import pandas as pd logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) from llama_index.core.evaluation import DatasetGenerator, RelevancyEvaluator from llama_index.core import SimpleDirectoryReader, VectorStoreIndex, Response from llama_index.llms.openai import OpenAI get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") reader = SimpleDirectoryReader("./data/paul_graham/") documents = reader.load_data() data_generator = DatasetGenerator.from_documents(documents) eval_questions = data_generator.generate_questions_from_nodes() eval_questions gpt4 = OpenAI(temperature=0, model="gpt-4") evaluator_gpt4 =
RelevancyEvaluator(llm=gpt4)
llama_index.core.evaluation.RelevancyEvaluator
get_ipython().run_line_magic('pip', 'install llama-index-readers-file') get_ipython().run_line_magic('pip', 'install llama-index-program-openai') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') from llama_index.core import PromptTemplate choices = [ "Useful for questions related to apples", "Useful for questions related to oranges", ] def get_choice_str(choices): choices_str = "\n\n".join( [f"{idx+1}. {c}" for idx, c in enumerate(choices)] ) return choices_str choices_str = get_choice_str(choices) router_prompt0 = PromptTemplate( "Some choices are given below. It is provided in a numbered list (1 to" " {num_choices}), where each item in the list corresponds to a" " summary.\n---------------------\n{context_list}\n---------------------\nUsing" " only the choices above and not prior knowledge, return the top choices" " (no more than {max_outputs}, but only select what is needed) that are" " most relevant to the question: '{query_str}'\n" ) from llama_index.llms.openai import OpenAI llm = OpenAI(model="gpt-3.5-turbo") def get_formatted_prompt(query_str): fmt_prompt = router_prompt0.format( num_choices=len(choices), max_outputs=2, context_list=choices_str, query_str=query_str, ) return fmt_prompt query_str = "Can you tell me more about the amount of Vitamin C in apples" fmt_prompt = get_formatted_prompt(query_str) response = llm.complete(fmt_prompt) print(str(response)) query_str = "What are the health benefits of eating orange peels?" fmt_prompt = get_formatted_prompt(query_str) response = llm.complete(fmt_prompt) print(str(response)) query_str = ( "Can you tell me more about the amount of Vitamin C in apples and oranges." ) fmt_prompt = get_formatted_prompt(query_str) response = llm.complete(fmt_prompt) print(str(response)) from dataclasses import fields from pydantic import BaseModel import json class Answer(BaseModel): choice: int reason: str print(json.dumps(Answer.schema(), indent=2)) from llama_index.core.types import BaseOutputParser FORMAT_STR = """The output should be formatted as a JSON instance that conforms to the JSON schema below. Here is the output schema: { "type": "array", "items": { "type": "object", "properties": { "choice": { "type": "integer" }, "reason": { "type": "string" } }, "required": [ "choice", "reason" ], "additionalProperties": false } } """ def _escape_curly_braces(input_string: str) -> str: escaped_string = input_string.replace("{", "{{").replace("}", "}}") return escaped_string def _marshal_output_to_json(output: str) -> str: output = output.strip() left = output.find("[") right = output.find("]") output = output[left : right + 1] return output from typing import List class RouterOutputParser(BaseOutputParser): def parse(self, output: str) -> List[Answer]: """Parse string.""" json_output = _marshal_output_to_json(output) json_dicts = json.loads(json_output) answers = [Answer.from_dict(json_dict) for json_dict in json_dicts] return answers def format(self, prompt_template: str) -> str: return prompt_template + "\n\n" + _escape_curly_braces(FORMAT_STR) output_parser = RouterOutputParser() from typing import List def route_query( query_str: str, choices: List[str], output_parser: RouterOutputParser ): choices_str fmt_base_prompt = router_prompt0.format( num_choices=len(choices), max_outputs=len(choices), context_list=choices_str, query_str=query_str, ) fmt_json_prompt = output_parser.format(fmt_base_prompt) raw_output = llm.complete(fmt_json_prompt) parsed = output_parser.parse(str(raw_output)) return parsed from pydantic import Field class Answer(BaseModel): "Represents a single choice with a reason." choice: int reason: str class Answers(BaseModel): """Represents a list of answers.""" answers: List[Answer] Answers.schema() from llama_index.program.openai import OpenAIPydanticProgram router_prompt1 = router_prompt0.partial_format( num_choices=len(choices), max_outputs=len(choices), ) program = OpenAIPydanticProgram.from_defaults( output_cls=Answers, prompt=router_prompt1, verbose=True, ) query_str = "What are the health benefits of eating orange peels?" output = program(context_list=choices_str, query_str=query_str) output get_ipython().system('mkdir data') get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"') from pathlib import Path from llama_index.readers.file import PyMuPDFReader loader = PyMuPDFReader() documents = loader.load(file_path="./data/llama2.pdf") from llama_index.core import VectorStoreIndex from llama_index.core import SummaryIndex from llama_index.core.node_parser import SentenceSplitter splitter = SentenceSplitter(chunk_size=1024) vector_index = VectorStoreIndex.from_documents( documents, transformations=[splitter] ) summary_index = SummaryIndex.from_documents( documents, transformations=[splitter] ) vector_query_engine = vector_index.as_query_engine(llm=llm) summary_query_engine = summary_index.as_query_engine(llm=llm) from llama_index.core.query_engine import CustomQueryEngine, BaseQueryEngine from llama_index.core.response_synthesizers import TreeSummarize class RouterQueryEngine(CustomQueryEngine): """Use our Pydantic program to perform routing.""" query_engines: List[BaseQueryEngine] choice_descriptions: List[str] verbose: bool = False router_prompt: PromptTemplate llm: OpenAI summarizer: TreeSummarize = Field(default_factory=TreeSummarize) def custom_query(self, query_str: str): """Define custom query.""" program = OpenAIPydanticProgram.from_defaults( output_cls=Answers, prompt=router_prompt1, verbose=self.verbose, llm=self.llm, ) choices_str = get_choice_str(self.choice_descriptions) output = program(context_list=choices_str, query_str=query_str) if self.verbose: print(f"Selected choice(s):") for answer in output.answers: print(f"Choice: {answer.choice}, Reason: {answer.reason}") responses = [] for answer in output.answers: choice_idx = answer.choice - 1 query_engine = self.query_engines[choice_idx] response = query_engine.query(query_str) responses.append(response) if len(responses) == 1: return responses[0] else: response_strs = [str(r) for r in responses] result_response = self.summarizer.get_response( query_str, response_strs ) return result_response choices = [ ( "Useful for answering questions about specific sections of the Llama 2" " paper" ), "Useful for questions that ask for a summary of the whole paper", ] router_query_engine = RouterQueryEngine( query_engines=[vector_query_engine, summary_query_engine], choice_descriptions=choices, verbose=True, router_prompt=router_prompt1, llm=
OpenAI(model="gpt-4")
llama_index.llms.openai.OpenAI
get_ipython().run_line_magic('pip', 'install llama-index-agent-openai-legacy') get_ipython().system('pip install llama-index') import json from typing import Sequence from llama_index.core import ( SimpleDirectoryReader, VectorStoreIndex, StorageContext, load_index_from_storage, ) from llama_index.core.tools import QueryEngineTool, ToolMetadata try: storage_context = StorageContext.from_defaults( persist_dir="./storage/march" ) march_index = load_index_from_storage(storage_context) storage_context = StorageContext.from_defaults( persist_dir="./storage/june" ) june_index =
load_index_from_storage(storage_context)
llama_index.core.load_index_from_storage
get_ipython().run_line_magic('pip', 'install llama-index-llms-anthropic') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") from llama_index.core import VectorStoreIndex, SimpleDirectoryReader from llama_index.llms.openai import OpenAI from llama_index.llms.anthropic import Anthropic llm = OpenAI() data = SimpleDirectoryReader(input_dir="./data/paul_graham/").load_data() index =
VectorStoreIndex.from_documents(data)
llama_index.core.VectorStoreIndex.from_documents
get_ipython().run_line_magic('pip', 'install llama-index-agent-openai') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') from llama_index.agent.openai import OpenAIAgent from llama_index.llms.openai import OpenAI from llama_index.core.tools import BaseTool, FunctionTool def multiply(a: int, b: int) -> int: """Multiple two integers and returns the result integer""" return a * b multiply_tool = FunctionTool.from_defaults(fn=multiply) def add(a: int, b: int) -> int: """Add two integers and returns the result integer""" return a + b add_tool = FunctionTool.from_defaults(fn=add) llm = OpenAI(model="gpt-3.5-turbo-1106") agent = OpenAIAgent.from_tools( [multiply_tool, add_tool], llm=llm, verbose=True ) response = agent.chat("What is (121 * 3) + 42?") print(str(response)) response = agent.stream_chat("What is (121 * 3) + 42?") import nest_asyncio nest_asyncio.apply() response = await agent.achat("What is (121 * 3) + 42?") print(str(response)) response = await agent.astream_chat("What is (121 * 3) + 42?") response_gen = response.response_gen async for token in response.async_response_gen(): print(token, end="") import json def get_current_weather(location, unit="fahrenheit"): """Get the current weather in a given location""" if "tokyo" in location.lower(): return json.dumps( {"location": location, "temperature": "10", "unit": "celsius"} ) elif "san francisco" in location.lower(): return json.dumps( {"location": location, "temperature": "72", "unit": "fahrenheit"} ) else: return json.dumps( {"location": location, "temperature": "22", "unit": "celsius"} ) weather_tool = FunctionTool.from_defaults(fn=get_current_weather) llm = OpenAI(model="gpt-3.5-turbo-1106") agent =
OpenAIAgent.from_tools([weather_tool], llm=llm, verbose=True)
llama_index.agent.openai.OpenAIAgent.from_tools
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-qdrant') get_ipython().run_line_magic('pip', 'install llama_index ftfy regex tqdm') get_ipython().run_line_magic('pip', 'install git+https://github.com/openai/CLIP.git') get_ipython().run_line_magic('pip', 'install torch torchvision') get_ipython().run_line_magic('pip', 'install matplotlib scikit-image') get_ipython().run_line_magic('pip', 'install -U qdrant_client') from pathlib import Path import requests wiki_titles = [ "batman", "Vincent van Gogh", "San Francisco", "iPhone", "Tesla Model S", "BTS", ] data_path = Path("data_wiki") for title in wiki_titles: response = requests.get( "https://en.wikipedia.org/w/api.php", params={ "action": "query", "format": "json", "titles": title, "prop": "extracts", "explaintext": True, }, ).json() page = next(iter(response["query"]["pages"].values())) wiki_text = page["extract"] if not data_path.exists(): Path.mkdir(data_path) with open(data_path / f"{title}.txt", "w") as fp: fp.write(wiki_text) import wikipedia import urllib.request image_path = Path("data_wiki") image_uuid = 0 image_metadata_dict = {} MAX_IMAGES_PER_WIKI = 30 wiki_titles = [ "San Francisco", "Batman", "Vincent van Gogh", "iPhone", "Tesla Model S", "BTS band", ] if not image_path.exists(): Path.mkdir(image_path) for title in wiki_titles: images_per_wiki = 0 print(title) try: page_py = wikipedia.page(title) list_img_urls = page_py.images for url in list_img_urls: if url.endswith(".jpg") or url.endswith(".png"): image_uuid += 1 image_file_name = title + "_" + url.split("/")[-1] image_metadata_dict[image_uuid] = { "filename": image_file_name, "img_path": "./" + str(image_path / f"{image_uuid}.jpg"), } urllib.request.urlretrieve( url, image_path / f"{image_uuid}.jpg" ) images_per_wiki += 1 if images_per_wiki > MAX_IMAGES_PER_WIKI: break except: print(str(Exception("No images found for Wikipedia page: ")) + title) continue import os os.environ["OPENAI_API_KEY"] = "YOUR_API_KEY" import qdrant_client from llama_index.core import SimpleDirectoryReader from llama_index.vector_stores.qdrant import QdrantVectorStore from llama_index.core import VectorStoreIndex, StorageContext from llama_index.core.indices import MultiModalVectorStoreIndex client = qdrant_client.QdrantClient(path="qdrant_db") text_store = QdrantVectorStore( client=client, collection_name="text_collection" ) image_store = QdrantVectorStore( client=client, collection_name="image_collection" ) storage_context = StorageContext.from_defaults( vector_store=text_store, image_store=image_store ) documents = SimpleDirectoryReader("./data_wiki/").load_data() index = MultiModalVectorStoreIndex.from_documents( documents, storage_context=storage_context, ) from PIL import Image import matplotlib.pyplot as plt import os def plot_images(image_metadata_dict): original_images_urls = [] images_shown = 0 for image_id in image_metadata_dict: img_path = image_metadata_dict[image_id]["img_path"] if os.path.isfile(img_path): filename = image_metadata_dict[image_id]["filename"] image = Image.open(img_path).convert("RGB") plt.subplot(8, 8, len(original_images_urls) + 1) plt.imshow(image) plt.xticks([]) plt.yticks([]) original_images_urls.append(filename) images_shown += 1 if images_shown >= 64: break plt.tight_layout() plot_images(image_metadata_dict) def plot_images(image_paths): images_shown = 0 plt.figure(figsize=(16, 9)) for img_path in image_paths: if os.path.isfile(img_path): image = Image.open(img_path) plt.subplot(2, 3, images_shown + 1) plt.imshow(image) plt.xticks([]) plt.yticks([]) images_shown += 1 if images_shown >= 9: break test_query = "who are BTS team members" retriever = index.as_retriever(similarity_top_k=3, image_similarity_top_k=5) retrieval_results = retriever.retrieve(test_query) from llama_index.core.response.notebook_utils import display_source_node from llama_index.core.schema import ImageNode retrieved_image = [] for res_node in retrieval_results: if isinstance(res_node.node, ImageNode): retrieved_image.append(res_node.node.metadata["file_path"]) else:
display_source_node(res_node, source_length=200)
llama_index.core.response.notebook_utils.display_source_node
get_ipython().run_line_magic('pip', 'install llama-index-llms-konko') get_ipython().system('pip install llama-index') import os os.environ["KONKO_API_KEY"] = "<your-api-key>" from llama_index.llms.konko import Konko from llama_index.core.llms import ChatMessage llm = Konko(model="meta-llama/llama-2-13b-chat") messages = ChatMessage(role="user", content="Explain Big Bang Theory briefly") resp = llm.chat([messages]) print(resp) import os os.environ["OPENAI_API_KEY"] = "<your-api-key>" llm = Konko(model="gpt-3.5-turbo") message =
ChatMessage(role="user", content="Explain Big Bang Theory briefly")
llama_index.core.llms.ChatMessage
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') from llama_index.core.query_pipeline import ( QueryPipeline as QP, Link, InputComponent, ) from llama_index.core.query_engine.pandas import PandasInstructionParser from llama_index.llms.openai import OpenAI from llama_index.core import PromptTemplate get_ipython().system("wget 'https://raw.githubusercontent.com/jerryjliu/llama_index/main/docs/examples/data/csv/titanic_train.csv' -O 'titanic_train.csv'") import pandas as pd df = pd.read_csv("./titanic_train.csv") instruction_str = ( "1. Convert the query to executable Python code using Pandas.\n" "2. The final line of code should be a Python expression that can be called with the `eval()` function.\n" "3. The code should represent a solution to the query.\n" "4. PRINT ONLY THE EXPRESSION.\n" "5. Do not quote the expression.\n" ) pandas_prompt_str = ( "You are working with a pandas dataframe in Python.\n" "The name of the dataframe is `df`.\n" "This is the result of `print(df.head())`:\n" "{df_str}\n\n" "Follow these instructions:\n" "{instruction_str}\n" "Query: {query_str}\n\n" "Expression:" ) response_synthesis_prompt_str = ( "Given an input question, synthesize a response from the query results.\n" "Query: {query_str}\n\n" "Pandas Instructions (optional):\n{pandas_instructions}\n\n" "Pandas Output: {pandas_output}\n\n" "Response: " ) pandas_prompt =
PromptTemplate(pandas_prompt_str)
llama_index.core.PromptTemplate
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') import nest_asyncio nest_asyncio.apply() from llama_index.core import SimpleDirectoryReader, VectorStoreIndex from llama_index.core.response.pprint_utils import pprint_response from llama_index.llms.openai import OpenAI from llama_index.core.tools import QueryEngineTool, ToolMetadata from llama_index.core.query_engine import SubQuestionQueryEngine import os os.environ["OPENAI_API_KEY"] = "OPENAI_API_KEY" from llama_index.core import Settings Settings.llm = OpenAI(temperature=0.2, model="gpt-3.5-turbo") get_ipython().system("mkdir -p 'data/10q/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/10q/uber_10q_march_2022.pdf' -O 'data/10q/uber_10q_march_2022.pdf'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/10q/uber_10q_june_2022.pdf' -O 'data/10q/uber_10q_june_2022.pdf'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/10q/uber_10q_sept_2022.pdf' -O 'data/10q/uber_10q_sept_2022.pdf'") march_2022 = SimpleDirectoryReader( input_files=["./data/10q/uber_10q_march_2022.pdf"] ).load_data() june_2022 = SimpleDirectoryReader( input_files=["./data/10q/uber_10q_june_2022.pdf"] ).load_data() sept_2022 = SimpleDirectoryReader( input_files=["./data/10q/uber_10q_sept_2022.pdf"] ).load_data() march_index = VectorStoreIndex.from_documents(march_2022) june_index = VectorStoreIndex.from_documents(june_2022) sept_index =
VectorStoreIndex.from_documents(sept_2022)
llama_index.core.VectorStoreIndex.from_documents
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-llms-cohere') get_ipython().run_line_magic('pip', 'install llama-index-llms-gemini') get_ipython().system('pip install "google-generativeai" -q') import nest_asyncio nest_asyncio.apply() from llama_index.core.llama_dataset import download_llama_dataset pairwise_evaluator_dataset, _ = download_llama_dataset( "MtBenchHumanJudgementDataset", "./mt_bench_data" ) pairwise_evaluator_dataset.to_pandas()[:5] from llama_index.core.evaluation import PairwiseComparisonEvaluator from llama_index.llms.openai import OpenAI from llama_index.llms.gemini import Gemini from llama_index.llms.cohere import Cohere llm_gpt4 =
OpenAI(temperature=0, model="gpt-4")
llama_index.llms.openai.OpenAI
get_ipython().run_line_magic('pip', 'install llama-index-readers-github') get_ipython().system('pip install llama-index') get_ipython().system('pip install nest_asyncio httpx') import nest_asyncio nest_asyncio.apply() get_ipython().run_line_magic('env', 'OPENAI_API_KEY=sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx') from llama_index.core import VectorStoreIndex from llama_index.readers.github import GithubRepositoryReader from IPython.display import Markdown, display import os get_ipython().run_line_magic('env', 'GITHUB_TOKEN=github_pat_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx') github_token = os.environ.get("GITHUB_TOKEN") owner = "jerryjliu" repo = "llama_index" branch = "main" documents = GithubRepositoryReader( github_token=github_token, owner=owner, repo=repo, use_parser=False, verbose=False, ignore_directories=["examples"], ).load_data(branch=branch) index =
VectorStoreIndex.from_documents(documents)
llama_index.core.VectorStoreIndex.from_documents
get_ipython().run_line_magic('pip', 'install llama-index-llms-fireworks') get_ipython().run_line_magic('pip', 'install llama-index') from llama_index.llms.fireworks import Fireworks resp = Fireworks().complete("Paul Graham is ") print(resp) from llama_index.core.llms import ChatMessage from llama_index.llms.fireworks import Fireworks messages = [ ChatMessage( role="system", content="You are a pirate with a colorful personality" ), ChatMessage(role="user", content="What is your name"), ] resp =
Fireworks()
llama_index.llms.fireworks.Fireworks
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-pinecone') get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') import openai import os os.environ["OPENAI_API_KEY"] = "[You API key]" get_ipython().system('pip install llama-index') import nest_asyncio nest_asyncio.apply() import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) import pinecone import os api_key = os.environ["PINECONE_API_KEY"] pinecone.init(api_key=api_key, environment="us-west1-gcp-free") pinecone_index = pinecone.Index("quickstart") pinecone_index.delete(deleteAll=True) from llama_index.core import StorageContext from llama_index.vector_stores.pinecone import PineconeVectorStore from llama_index.core import VectorStoreIndex vector_store = PineconeVectorStore( pinecone_index=pinecone_index, namespace="wiki_cities" ) storage_context = StorageContext.from_defaults(vector_store=vector_store) vector_index = VectorStoreIndex([], storage_context=storage_context) from sqlalchemy import ( create_engine, MetaData, Table, Column, String, Integer, select, column, ) engine = create_engine("sqlite:///:memory:", future=True) metadata_obj = MetaData() table_name = "city_stats" city_stats_table = Table( table_name, metadata_obj, Column("city_name", String(16), primary_key=True), Column("population", Integer), Column("country", String(16), nullable=False), ) metadata_obj.create_all(engine) metadata_obj.tables.keys() from sqlalchemy import insert rows = [ {"city_name": "Toronto", "population": 2930000, "country": "Canada"}, {"city_name": "Tokyo", "population": 13960000, "country": "Japan"}, {"city_name": "Berlin", "population": 3645000, "country": "Germany"}, ] for row in rows: stmt = insert(city_stats_table).values(**row) with engine.begin() as connection: cursor = connection.execute(stmt) with engine.connect() as connection: cursor = connection.exec_driver_sql("SELECT * FROM city_stats") print(cursor.fetchall()) get_ipython().system('pip install wikipedia') from llama_index.readers.wikipedia import WikipediaReader cities = ["Toronto", "Berlin", "Tokyo"] wiki_docs = WikipediaReader().load_data(pages=cities) from llama_index.core import SQLDatabase sql_database = SQLDatabase(engine, include_tables=["city_stats"]) from llama_index.core.query_engine import NLSQLTableQueryEngine sql_query_engine = NLSQLTableQueryEngine( sql_database=sql_database, tables=["city_stats"], ) from llama_index.core import Settings for city, wiki_doc in zip(cities, wiki_docs): nodes = Settings.node_parser.get_nodes_from_documents([wiki_doc]) for node in nodes: node.metadata = {"title": city} vector_index.insert_nodes(nodes) from llama_index.llms.openai import OpenAI from llama_index.core.retrievers import VectorIndexAutoRetriever from llama_index.core.vector_stores import MetadataInfo, VectorStoreInfo from llama_index.core.query_engine import RetrieverQueryEngine vector_store_info = VectorStoreInfo( content_info="articles about different cities", metadata_info=[ MetadataInfo( name="title", type="str", description="The name of the city" ), ], ) vector_auto_retriever = VectorIndexAutoRetriever( vector_index, vector_store_info=vector_store_info ) retriever_query_engine = RetrieverQueryEngine.from_args( vector_auto_retriever, llm=
OpenAI(model="gpt-4")
llama_index.llms.openai.OpenAI
get_ipython().run_line_magic('pip', 'install llama-index-readers-github') get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-weaviate') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index llama-hub') import nest_asyncio nest_asyncio.apply() import os os.environ["GITHUB_TOKEN"] = "ghp_..." os.environ["OPENAI_API_KEY"] = "sk-..." import os from llama_index.readers.github import ( GitHubRepositoryIssuesReader, GitHubIssuesClient, ) github_client = GitHubIssuesClient() loader = GitHubRepositoryIssuesReader( github_client, owner="run-llama", repo="llama_index", verbose=True, ) orig_docs = loader.load_data() limit = 100 docs = [] for idx, doc in enumerate(orig_docs): doc.metadata["index_id"] = int(doc.id_) if idx >= limit: break docs.append(doc) import weaviate auth_config = weaviate.AuthApiKey( api_key="XRa15cDIkYRT7AkrpqT6jLfE4wropK1c1TGk" ) client = weaviate.Client( "https://llama-index-test-v0oggsoz.weaviate.network", auth_client_secret=auth_config, ) class_name = "LlamaIndex_docs" client.schema.delete_class(class_name) from llama_index.vector_stores.weaviate import WeaviateVectorStore from llama_index.core import VectorStoreIndex, StorageContext vector_store = WeaviateVectorStore( weaviate_client=client, index_name=class_name ) storage_context =
StorageContext.from_defaults(vector_store=vector_store)
llama_index.core.StorageContext.from_defaults
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-huggingface') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') import os import openai os.environ["OPENAI_API_KEY"] = "sk-..." from llama_index.embeddings.huggingface import HuggingFaceEmbedding from llama_index.llms.openai import OpenAI from llama_index.core import Settings Settings.llm = OpenAI(model="gpt-3.5-turbo-instruct", temperature=0.1) Settings.embed_model =
HuggingFaceEmbedding(model_name="BAAI/bge-base-en-v1.5")
llama_index.embeddings.huggingface.HuggingFaceEmbedding
get_ipython().system('pip install llama-index') import openai import os os.environ["OPENAI_API_KEY"] = "YOUR_API_KEY" openai.api_key = os.environ["OPENAI_API_KEY"] from typing import Any, List from InstructorEmbedding import INSTRUCTOR from llama_index.core.bridge.pydantic import PrivateAttr from llama_index.core.embeddings import BaseEmbedding class InstructorEmbeddings(BaseEmbedding): _model: INSTRUCTOR = PrivateAttr() _instruction: str = PrivateAttr() def __init__( self, instructor_model_name: str = "hkunlp/instructor-large", instruction: str = "Represent a document for semantic search:", **kwargs: Any, ) -> None: self._model = INSTRUCTOR(instructor_model_name) self._instruction = instruction super().__init__(**kwargs) @classmethod def class_name(cls) -> str: return "instructor" async def _aget_query_embedding(self, query: str) -> List[float]: return self._get_query_embedding(query) async def _aget_text_embedding(self, text: str) -> List[float]: return self._get_text_embedding(text) def _get_query_embedding(self, query: str) -> List[float]: embeddings = self._model.encode([[self._instruction, query]]) return embeddings[0] def _get_text_embedding(self, text: str) -> List[float]: embeddings = self._model.encode([[self._instruction, text]]) return embeddings[0] def _get_text_embeddings(self, texts: List[str]) -> List[List[float]]: embeddings = self._model.encode( [[self._instruction, text] for text in texts] ) return embeddings from llama_index.core import SimpleDirectoryReader, VectorStoreIndex from llama_index.core import Settings get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") documents =
SimpleDirectoryReader("./data/paul_graham/")
llama_index.core.SimpleDirectoryReader
from llama_index.core import SQLDatabase from sqlalchemy import ( create_engine, MetaData, Table, Column, String, Integer, select, column, ) engine = create_engine("sqlite:///chinook.db") sql_database = SQLDatabase(engine) from llama_index.core.query_pipeline import QueryPipeline get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('curl "https://www.sqlitetutorial.net/wp-content/uploads/2018/03/chinook.zip" -O ./chinook.zip') get_ipython().system('unzip ./chinook.zip') from llama_index.core.settings import Settings from llama_index.core.callbacks import CallbackManager callback_manager = CallbackManager() Settings.callback_manager = callback_manager import phoenix as px import llama_index.core px.launch_app() llama_index.core.set_global_handler("arize_phoenix") from llama_index.core.query_engine import NLSQLTableQueryEngine from llama_index.core.tools import QueryEngineTool sql_query_engine = NLSQLTableQueryEngine( sql_database=sql_database, tables=["albums", "tracks", "artists"], verbose=True, ) sql_tool = QueryEngineTool.from_defaults( query_engine=sql_query_engine, name="sql_tool", description=( "Useful for translating a natural language query into a SQL query" ), ) from llama_index.core.query_pipeline import QueryPipeline as QP qp = QP(verbose=True) from llama_index.core.agent.react.types import ( ActionReasoningStep, ObservationReasoningStep, ResponseReasoningStep, ) from llama_index.core.agent import Task, AgentChatResponse from llama_index.core.query_pipeline import ( AgentInputComponent, AgentFnComponent, CustomAgentComponent, QueryComponent, ToolRunnerComponent, ) from llama_index.core.llms import MessageRole from typing import Dict, Any, Optional, Tuple, List, cast def agent_input_fn(task: Task, state: Dict[str, Any]) -> Dict[str, Any]: """Agent input function. Returns: A Dictionary of output keys and values. If you are specifying src_key when defining links between this component and other components, make sure the src_key matches the specified output_key. """ if "current_reasoning" not in state: state["current_reasoning"] = [] reasoning_step = ObservationReasoningStep(observation=task.input) state["current_reasoning"].append(reasoning_step) return {"input": task.input} agent_input_component = AgentInputComponent(fn=agent_input_fn) from llama_index.core.agent import ReActChatFormatter from llama_index.core.query_pipeline import InputComponent, Link from llama_index.core.llms import ChatMessage from llama_index.core.tools import BaseTool def react_prompt_fn( task: Task, state: Dict[str, Any], input: str, tools: List[BaseTool] ) -> List[ChatMessage]: chat_formatter = ReActChatFormatter() return chat_formatter.format( tools, chat_history=task.memory.get() + state["memory"].get_all(), current_reasoning=state["current_reasoning"], ) react_prompt_component = AgentFnComponent( fn=react_prompt_fn, partial_dict={"tools": [sql_tool]} ) from typing import Set, Optional from llama_index.core.agent.react.output_parser import ReActOutputParser from llama_index.core.llms import ChatResponse from llama_index.core.agent.types import Task def parse_react_output_fn( task: Task, state: Dict[str, Any], chat_response: ChatResponse ): """Parse ReAct output into a reasoning step.""" output_parser = ReActOutputParser() reasoning_step = output_parser.parse(chat_response.message.content) return {"done": reasoning_step.is_done, "reasoning_step": reasoning_step} parse_react_output = AgentFnComponent(fn=parse_react_output_fn) def run_tool_fn( task: Task, state: Dict[str, Any], reasoning_step: ActionReasoningStep ): """Run tool and process tool output.""" tool_runner_component = ToolRunnerComponent( [sql_tool], callback_manager=task.callback_manager ) tool_output = tool_runner_component.run_component( tool_name=reasoning_step.action, tool_input=reasoning_step.action_input, ) observation_step = ObservationReasoningStep(observation=str(tool_output)) state["current_reasoning"].append(observation_step) return {"response_str": observation_step.get_content(), "is_done": False} run_tool = AgentFnComponent(fn=run_tool_fn) def process_response_fn( task: Task, state: Dict[str, Any], response_step: ResponseReasoningStep ): """Process response.""" state["current_reasoning"].append(response_step) response_str = response_step.response state["memory"].put(ChatMessage(content=task.input, role=MessageRole.USER)) state["memory"].put( ChatMessage(content=response_str, role=MessageRole.ASSISTANT) ) return {"response_str": response_str, "is_done": True} process_response = AgentFnComponent(fn=process_response_fn) def process_agent_response_fn( task: Task, state: Dict[str, Any], response_dict: dict ): """Process agent response.""" return ( AgentChatResponse(response_dict["response_str"]), response_dict["is_done"], ) process_agent_response = AgentFnComponent(fn=process_agent_response_fn) from llama_index.core.query_pipeline import QueryPipeline as QP from llama_index.llms.openai import OpenAI qp.add_modules( { "agent_input": agent_input_component, "react_prompt": react_prompt_component, "llm": OpenAI(model="gpt-4-1106-preview"), "react_output_parser": parse_react_output, "run_tool": run_tool, "process_response": process_response, "process_agent_response": process_agent_response, } ) qp.add_chain(["agent_input", "react_prompt", "llm", "react_output_parser"]) qp.add_link( "react_output_parser", "run_tool", condition_fn=lambda x: not x["done"], input_fn=lambda x: x["reasoning_step"], ) qp.add_link( "react_output_parser", "process_response", condition_fn=lambda x: x["done"], input_fn=lambda x: x["reasoning_step"], ) qp.add_link("process_response", "process_agent_response") qp.add_link("run_tool", "process_agent_response") from pyvis.network import Network net = Network(notebook=True, cdn_resources="in_line", directed=True) net.from_nx(qp.clean_dag) net.show("agent_dag.html") from llama_index.core.agent import QueryPipelineAgentWorker, AgentRunner from llama_index.core.callbacks import CallbackManager agent_worker = QueryPipelineAgentWorker(qp) agent = AgentRunner( agent_worker, callback_manager=CallbackManager([]), verbose=True ) task = agent.create_task( "What are some tracks from the artist AC/DC? Limit it to 3" ) step_output = agent.run_step(task.task_id) step_output = agent.run_step(task.task_id) step_output.is_last response = agent.finalize_response(task.task_id) print(str(response)) agent.reset() response = agent.chat( "What are some tracks from the artist AC/DC? Limit it to 3" ) print(str(response)) from llama_index.llms.openai import OpenAI llm =
OpenAI(model="gpt-4-1106-preview")
llama_index.llms.openai.OpenAI
get_ipython().system('pip install llama-index') get_ipython().system('pip install llama-index-core') get_ipython().system('pip install --quiet transformers torch') get_ipython().system('pip install llama-index-embeddings-openai') get_ipython().system('pip install llama-index-llms-openai') get_ipython().system('pip install llama-index-postprocessor-colbert-rerank') from llama_index.core import ( VectorStoreIndex, SimpleDirectoryReader, ) get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") import os os.environ["OPENAI_API_KEY"] = "sk-" documents = SimpleDirectoryReader("./data/paul_graham/").load_data() index =
VectorStoreIndex.from_documents(documents=documents)
llama_index.core.VectorStoreIndex.from_documents
get_ipython().run_line_magic('pip', 'install llama-index-llms-gradient') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-readers-file') get_ipython().run_line_magic('pip', 'install llama-index-finetuning') get_ipython().system('pip install llama-index gradientai -q') import os from llama_index.llms.gradient import GradientBaseModelLLM from llama_index.finetuning import GradientFinetuneEngine os.environ["GRADIENT_ACCESS_TOKEN"] = os.getenv("GRADIENT_API_KEY") os.environ["GRADIENT_WORKSPACE_ID"] = "<insert_workspace_id>" from pydantic import BaseModel class Album(BaseModel): """Data model for an album.""" name: str artist: str from llama_index.core.callbacks import CallbackManager, LlamaDebugHandler from llama_index.llms.openai import OpenAI from llama_index.llms.gradient import GradientBaseModelLLM from llama_index.core.program import LLMTextCompletionProgram from llama_index.core.output_parsers import PydanticOutputParser openai_handler = LlamaDebugHandler() openai_callback = CallbackManager([openai_handler]) openai_llm = OpenAI(model="gpt-4", callback_manager=openai_callback) gradient_handler = LlamaDebugHandler() gradient_callback = CallbackManager([gradient_handler]) base_model_slug = "llama2-7b-chat" gradient_llm = GradientBaseModelLLM( base_model_slug=base_model_slug, max_tokens=300, callback_manager=gradient_callback, is_chat_model=True, ) from llama_index.core.llms import LLMMetadata prompt_template_str = """\ Generate an example album, with an artist and a list of songs. \ Using the movie {movie_name} as inspiration.\ """ openai_program = LLMTextCompletionProgram.from_defaults( output_parser=PydanticOutputParser(Album), prompt_template_str=prompt_template_str, llm=openai_llm, verbose=True, ) gradient_program = LLMTextCompletionProgram.from_defaults( output_parser=PydanticOutputParser(Album), prompt_template_str=prompt_template_str, llm=gradient_llm, verbose=True, ) response = openai_program(movie_name="The Shining") print(str(response)) tmp = openai_handler.get_llm_inputs_outputs() print(tmp[0][0].payload["messages"][0]) response = gradient_program(movie_name="The Shining") print(str(response)) tmp = gradient_handler.get_llm_inputs_outputs() print(tmp[0][0].payload["messages"][0]) from llama_index.core.program import LLMTextCompletionProgram from pydantic import BaseModel from llama_index.llms.openai import OpenAI from llama_index.core.callbacks import GradientAIFineTuningHandler from llama_index.core.callbacks import CallbackManager from llama_index.core.output_parsers import PydanticOutputParser from typing import List class Song(BaseModel): """Data model for a song.""" title: str length_seconds: int class Album(BaseModel): """Data model for an album.""" name: str artist: str songs: List[Song] finetuning_handler = GradientAIFineTuningHandler() callback_manager = CallbackManager([finetuning_handler]) llm_gpt4 = OpenAI(model="gpt-4", callback_manager=callback_manager) prompt_template_str = """\ Generate an example album, with an artist and a list of songs. \ Using the movie {movie_name} as inspiration.\ """ openai_program = LLMTextCompletionProgram.from_defaults( output_parser=PydanticOutputParser(Album), prompt_template_str=prompt_template_str, llm=llm_gpt4, verbose=True, ) movie_names = [ "The Shining", "The Departed", "Titanic", "Goodfellas", "Pretty Woman", "Home Alone", "Caged Fury", "Edward Scissorhands", "Total Recall", "Ghost", "Tremors", "RoboCop", "Rocky V", ] from tqdm.notebook import tqdm for movie_name in tqdm(movie_names): output = openai_program(movie_name=movie_name) print(output.json()) events = finetuning_handler.get_finetuning_events() events finetuning_handler.save_finetuning_events("mock_finetune_songs.jsonl") get_ipython().system('cat mock_finetune_songs.jsonl') base_model_slug = "llama2-7b-chat" base_llm = GradientBaseModelLLM( base_model_slug=base_model_slug, max_tokens=500, is_chat_model=True ) from llama_index.finetuning import GradientFinetuneEngine finetune_engine = GradientFinetuneEngine( base_model_slug=base_model_slug, name="movies_structured", data_path="mock_finetune_songs.jsonl", verbose=True, max_steps=200, batch_size=1, ) finetune_engine.model_adapter_id epochs = 2 for i in range(epochs): print(f"** EPOCH {i} **") finetune_engine.finetune() ft_llm = finetune_engine.get_finetuned_model( max_tokens=500, is_chat_model=True ) from llama_index.llms.gradient import GradientModelAdapterLLM new_prompt_template_str = """\ Generate an example album, with an artist and a list of songs. \ Using the movie {movie_name} as inspiration.\ Please only generate one album. """ gradient_program = LLMTextCompletionProgram.from_defaults( output_parser=PydanticOutputParser(Album), prompt_template_str=new_prompt_template_str, llm=ft_llm, verbose=True, ) gradient_program(movie_name="Goodfellas") gradient_program(movie_name="Chucky") base_gradient_program = LLMTextCompletionProgram.from_defaults( output_parser=PydanticOutputParser(Album), prompt_template_str=prompt_template_str, llm=base_llm, verbose=True, ) base_gradient_program(movie_name="Goodfellas") get_ipython().system('mkdir data && wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"') from pydantic import Field from typing import List class Citation(BaseModel): """Citation class.""" author: str = Field( ..., description="Inferred first author (usually last name" ) year: int = Field(..., description="Inferred year") desc: str = Field( ..., description=( "Inferred description from the text of the work that the author is" " cited for" ), ) class Response(BaseModel): """List of author citations. Extracted over unstructured text. """ citations: List[Citation] = Field( ..., description=( "List of author citations (organized by author, year, and" " description)." ), ) from llama_index.readers.file import PyMuPDFReader from llama_index.core import Document from llama_index.core.node_parser import SimpleNodeParser from pathlib import Path from llama_index.core.callbacks import GradientAIFineTuningHandler loader = PyMuPDFReader() docs0 = loader.load(file_path=Path("./data/llama2.pdf")) doc_text = "\n\n".join([d.get_content() for d in docs0]) metadata = { "paper_title": "Llama 2: Open Foundation and Fine-Tuned Chat Models" } docs = [Document(text=doc_text, metadata=metadata)] chunk_size = 1024 node_parser = SimpleNodeParser.from_defaults(chunk_size=chunk_size) nodes = node_parser.get_nodes_from_documents(docs) len(nodes) finetuning_handler = GradientAIFineTuningHandler() callback_manager = CallbackManager([finetuning_handler]) llm_gpt4 = OpenAI(model="gpt-4-0613", temperature=0.3) llm_gpt4.pydantic_program_mode = "llm" base_model_slug = "llama2-7b-chat" base_llm = GradientBaseModelLLM( base_model_slug=base_model_slug, max_tokens=500, is_chat_model=True ) base_llm.pydantic_program_mode = "llm" eval_llm = OpenAI(model="gpt-4-0613", temperature=0) from llama_index.core.evaluation import DatasetGenerator from llama_index.core import SummaryIndex from llama_index.core import PromptTemplate from tqdm.notebook import tqdm from tqdm.asyncio import tqdm_asyncio fp = open("data/qa_pairs.jsonl", "w") question_gen_prompt = PromptTemplate( """ {query_str} Context: {context_str} Questions: """ ) question_gen_query = """\ Snippets from a research paper is given below. It contains citations. Please generate questions from the text asking about these citations. For instance, here are some sample questions: Which citations correspond to related works on transformer models? Tell me about authors that worked on advancing RLHF. Can you tell me citations corresponding to all computer vision works? \ """ qr_pairs = [] node_questions_tasks = [] for idx, node in enumerate(nodes[:39]): num_questions = 1 # change this number to increase number of nodes dataset_generator = DatasetGenerator( [node], question_gen_query=question_gen_query, text_question_template=question_gen_prompt, llm=eval_llm, metadata_mode="all", num_questions_per_chunk=num_questions, ) task = dataset_generator.agenerate_questions_from_nodes(num=num_questions) node_questions_tasks.append(task) node_questions_lists = await tqdm_asyncio.gather(*node_questions_tasks) len(node_questions_lists) node_questions_lists[1] import pickle pickle.dump(node_questions_lists, open("llama2_questions.pkl", "wb")) node_questions_lists = pickle.load(open("llama2_questions.pkl", "rb")) from llama_index.core import VectorStoreIndex gpt4_index = VectorStoreIndex(nodes[:39], callback_manager=callback_manager) gpt4_query_engine = gpt4_index.as_query_engine( output_cls=Response, llm=llm_gpt4, similarity_top_k=1 ) from json import JSONDecodeError for idx, node in enumerate(tqdm(nodes[:39])): node_questions_0 = node_questions_lists[idx] for question in node_questions_0: try: gpt4_query_engine.query(question) except Exception as e: print(f"Error for question {question}, {repr(e)}") pass finetuning_handler.save_finetuning_events("llama2_citation_events.jsonl") from llama_index.finetuning import GradientFinetuneEngine finetune_engine = GradientFinetuneEngine( base_model_slug=base_model_slug, name="llama2_structured", data_path="llama2_citation_events.jsonl", verbose=True, max_steps=200, batch_size=1, ) finetune_engine.model_adapter_id epochs = 2 for i in range(epochs): print(f"** EPOCH {i} **") finetune_engine.finetune() ft_llm = finetune_engine.get_finetuned_model(max_tokens=500) from llama_index.core import VectorStoreIndex vector_index = VectorStoreIndex(nodes) query_engine = vector_index.as_query_engine( output_cls=Response, llm=ft_llm, similarity_top_k=1 ) base_index =
VectorStoreIndex(nodes)
llama_index.core.VectorStoreIndex
get_ipython().run_line_magic('pip', 'install llama-index-llms-portkey') get_ipython().system('pip install llama-index') get_ipython().system('pip install -U llama_index') get_ipython().system('pip install -U portkey-ai') from llama_index.llms.portkey import Portkey from llama_index.core.llms import ChatMessage import portkey as pk import os os.environ["PORTKEY_API_KEY"] = "PORTKEY_API_KEY" openai_virtual_key_a = "" openai_virtual_key_b = "" anthropic_virtual_key_a = "" anthropic_virtual_key_b = "" cohere_virtual_key_a = "" cohere_virtual_key_b = "" os.environ["OPENAI_API_KEY"] = "" os.environ["ANTHROPIC_API_KEY"] = "" portkey_client = Portkey( mode="single", ) openai_llm = pk.LLMOptions( provider="openai", model="gpt-4", virtual_key=openai_virtual_key_a, ) portkey_client.add_llms(openai_llm) messages = [
ChatMessage(role="system", content="You are a helpful assistant")
llama_index.core.llms.ChatMessage
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai') get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-azurecosmosmongo') get_ipython().run_line_magic('pip', 'install llama-index-llms-azure-openai') get_ipython().system('pip install llama-index') import os import json import openai from llama_index.llms.azure_openai import AzureOpenAI from llama_index.embeddings.openai import OpenAIEmbedding from llama_index.core import VectorStoreIndex, SimpleDirectoryReader import os llm = AzureOpenAI( model_name=os.getenv("OPENAI_MODEL_COMPLETION"), deployment_name=os.getenv("OPENAI_MODEL_COMPLETION"), api_base=os.getenv("OPENAI_API_BASE"), api_key=os.getenv("OPENAI_API_KEY"), api_type=os.getenv("OPENAI_API_TYPE"), api_version=os.getenv("OPENAI_API_VERSION"), temperature=0, ) embed_model = OpenAIEmbedding( model=os.getenv("OPENAI_MODEL_EMBEDDING"), deployment_name=os.getenv("OPENAI_DEPLOYMENT_EMBEDDING"), api_base=os.getenv("OPENAI_API_BASE"), api_key=os.getenv("OPENAI_API_KEY"), api_type=os.getenv("OPENAI_API_TYPE"), api_version=os.getenv("OPENAI_API_VERSION"), ) from llama_index.core import Settings Settings.llm = llm Settings.embed_model = embed_model get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") documents =
SimpleDirectoryReader("./data/paul_graham/")
llama_index.core.SimpleDirectoryReader
get_ipython().run_line_magic('pip', 'install llama-index-postprocessor-rankgpt-rerank') get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai') get_ipython().run_line_magic('pip', 'install llama-index-packs-infer-retrieve-rerank') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') import datasets dataset = datasets.load_dataset("BioDEX/BioDEX-ICSR") dataset from llama_index.core import get_tokenizer import re from typing import Set, List tokenizer = get_tokenizer() sample_size = 5 def get_reactions_row(raw_target: str) -> List[str]: """Get reactions from a single row.""" reaction_pattern = re.compile(r"reactions:\s*(.*)") reaction_match = reaction_pattern.search(raw_target) if reaction_match: reactions = reaction_match.group(1).split(",") reactions = [r.strip().lower() for r in reactions] else: reactions = [] return reactions def get_reactions_set(dataset) -> Set[str]: """Get set of all reactions.""" reactions = set() for data in dataset["train"]: reactions.update(set(get_reactions_row(data["target"]))) return reactions def get_samples(dataset, sample_size: int = 5): """Get processed sample. Contains source text and also the reaction label. Parse reaction text to specifically extract reactions. """ samples = [] for idx, data in enumerate(dataset["train"]): if idx >= sample_size: break text = data["fulltext_processed"] raw_target = data["target"] reactions = get_reactions_row(raw_target) samples.append({"text": text, "reactions": reactions}) return samples from llama_index.packs.infer_retrieve_rerank import InferRetrieveRerankPack from llama_index.core.llama_pack import download_llama_pack InferRetrieveRerankPack = download_llama_pack( "InferRetrieveRerankPack", "./irr_pack", ) from llama_index.llms.openai import OpenAI llm = OpenAI(model="gpt-3.5-turbo-16k") pred_context = """\ The output predictins should be a list of comma-separated adverse \ drug reactions. \ """ reranker_top_n = 10 pack = InferRetrieveRerankPack( get_reactions_set(dataset), llm=llm, pred_context=pred_context, reranker_top_n=reranker_top_n, verbose=True, ) samples = get_samples(dataset, sample_size=5) pred_reactions = pack.run(inputs=[s["text"] for s in samples]) gt_reactions = [s["reactions"] for s in samples] pred_reactions[2] gt_reactions[2] from llama_index.core.retrievers import BaseRetriever from llama_index.core.llms import LLM from llama_index.llms.openai import OpenAI from llama_index.core import PromptTemplate from llama_index.core.query_pipeline import QueryPipeline from llama_index.core.postprocessor.types import BaseNodePostprocessor from llama_index.postprocessor.rankgpt_rerank import RankGPTRerank from llama_index.core.output_parsers import ChainableOutputParser from typing import List import random all_reactions = get_reactions_set(dataset) random.sample(all_reactions, 5) from llama_index.core.schema import TextNode from llama_index.embeddings.openai import OpenAIEmbedding from llama_index.core.ingestion import IngestionPipeline from llama_index.core import VectorStoreIndex reaction_nodes = [TextNode(text=r) for r in all_reactions] pipeline = IngestionPipeline(transformations=[
OpenAIEmbedding()
llama_index.embeddings.openai.OpenAIEmbedding
get_ipython().run_line_magic('pip', 'install llama-index-llms-litellm') get_ipython().run_line_magic('pip', 'install llama-index-embeddings-cohere') get_ipython().system('pip install llama-index') import os cohere_api_key = "YOUR_API_KEY" os.environ["COHERE_API_KEY"] = cohere_api_key from llama_index.embeddings.cohere import CohereEmbedding embed_model = CohereEmbedding( cohere_api_key=cohere_api_key, model_name="embed-english-v3.0", input_type="search_query", ) embeddings = embed_model.get_text_embedding("Hello CohereAI!") print(len(embeddings)) print(embeddings[:5]) embed_model = CohereEmbedding( cohere_api_key=cohere_api_key, model_name="embed-english-v3.0", input_type="search_document", ) embeddings = embed_model.get_text_embedding("Hello CohereAI!") print(len(embeddings)) print(embeddings[:5]) embed_model = CohereEmbedding( cohere_api_key=cohere_api_key, model_name="embed-english-v2.0" ) embeddings = embed_model.get_text_embedding("Hello CohereAI!") print(len(embeddings)) print(embeddings[:5]) import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) from llama_index.core import VectorStoreIndex, SimpleDirectoryReader from llama_index.llms.litellm import LiteLLM from llama_index.core.response.notebook_utils import display_source_node from IPython.display import Markdown, display get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") documents =
SimpleDirectoryReader("./data/paul_graham/")
llama_index.core.SimpleDirectoryReader
from llama_index.core import VectorStoreIndex from llama_index.core.objects import ObjectIndex, SimpleObjectNodeMapping obj1 = {"input": "Hey, how's it going"} obj2 = ["a", "b", "c", "d"] obj3 = "llamaindex is an awesome library!" arbitrary_objects = [obj1, obj2, obj3] obj_node_mapping = SimpleObjectNodeMapping.from_objects(arbitrary_objects) nodes = obj_node_mapping.to_nodes(arbitrary_objects) object_index = ObjectIndex( index=VectorStoreIndex(nodes=nodes), object_node_mapping=obj_node_mapping ) object_retriever = object_index.as_retriever(similarity_top_k=1) object_retriever.retrieve("llamaindex") object_index.persist() reloaded_object_index = ObjectIndex.from_persist_dir() reloaded_object_index._object_node_mapping.obj_node_mapping object_index._object_node_mapping.obj_node_mapping from llama_index.core.tools import FunctionTool from llama_index.core import SummaryIndex from llama_index.core.objects import SimpleToolNodeMapping def add(a: int, b: int) -> int: """Add two integers and returns the result integer""" return a + b def multiply(a: int, b: int) -> int: """Multiple two integers and returns the result integer""" return a * b multiply_tool =
FunctionTool.from_defaults(fn=multiply)
llama_index.core.tools.FunctionTool.from_defaults
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-qdrant') get_ipython().run_line_magic('pip', 'install llama_index ftfy regex tqdm') get_ipython().run_line_magic('pip', 'install git+https://github.com/openai/CLIP.git') get_ipython().run_line_magic('pip', 'install torch torchvision') get_ipython().run_line_magic('pip', 'install matplotlib scikit-image') get_ipython().run_line_magic('pip', 'install -U qdrant_client') from pathlib import Path import requests wiki_titles = [ "batman", "Vincent van Gogh", "San Francisco", "iPhone", "Tesla Model S", "BTS", ] data_path = Path("data_wiki") for title in wiki_titles: response = requests.get( "https://en.wikipedia.org/w/api.php", params={ "action": "query", "format": "json", "titles": title, "prop": "extracts", "explaintext": True, }, ).json() page = next(iter(response["query"]["pages"].values())) wiki_text = page["extract"] if not data_path.exists(): Path.mkdir(data_path) with open(data_path / f"{title}.txt", "w") as fp: fp.write(wiki_text) import wikipedia import urllib.request image_path = Path("data_wiki") image_uuid = 0 image_metadata_dict = {} MAX_IMAGES_PER_WIKI = 30 wiki_titles = [ "San Francisco", "Batman", "Vincent van Gogh", "iPhone", "Tesla Model S", "BTS band", ] if not image_path.exists(): Path.mkdir(image_path) for title in wiki_titles: images_per_wiki = 0 print(title) try: page_py = wikipedia.page(title) list_img_urls = page_py.images for url in list_img_urls: if url.endswith(".jpg") or url.endswith(".png"): image_uuid += 1 image_file_name = title + "_" + url.split("/")[-1] image_metadata_dict[image_uuid] = { "filename": image_file_name, "img_path": "./" + str(image_path / f"{image_uuid}.jpg"), } urllib.request.urlretrieve( url, image_path / f"{image_uuid}.jpg" ) images_per_wiki += 1 if images_per_wiki > MAX_IMAGES_PER_WIKI: break except: print(str(Exception("No images found for Wikipedia page: ")) + title) continue import os os.environ["OPENAI_API_KEY"] = "YOUR_API_KEY" import qdrant_client from llama_index.core import SimpleDirectoryReader from llama_index.vector_stores.qdrant import QdrantVectorStore from llama_index.core import VectorStoreIndex, StorageContext from llama_index.core.indices import MultiModalVectorStoreIndex client = qdrant_client.QdrantClient(path="qdrant_db") text_store = QdrantVectorStore( client=client, collection_name="text_collection" ) image_store = QdrantVectorStore( client=client, collection_name="image_collection" ) storage_context = StorageContext.from_defaults( vector_store=text_store, image_store=image_store ) documents = SimpleDirectoryReader("./data_wiki/").load_data() index = MultiModalVectorStoreIndex.from_documents( documents, storage_context=storage_context, ) from PIL import Image import matplotlib.pyplot as plt import os def plot_images(image_metadata_dict): original_images_urls = [] images_shown = 0 for image_id in image_metadata_dict: img_path = image_metadata_dict[image_id]["img_path"] if os.path.isfile(img_path): filename = image_metadata_dict[image_id]["filename"] image = Image.open(img_path).convert("RGB") plt.subplot(8, 8, len(original_images_urls) + 1) plt.imshow(image) plt.xticks([]) plt.yticks([]) original_images_urls.append(filename) images_shown += 1 if images_shown >= 64: break plt.tight_layout() plot_images(image_metadata_dict) def plot_images(image_paths): images_shown = 0 plt.figure(figsize=(16, 9)) for img_path in image_paths: if os.path.isfile(img_path): image = Image.open(img_path) plt.subplot(2, 3, images_shown + 1) plt.imshow(image) plt.xticks([]) plt.yticks([]) images_shown += 1 if images_shown >= 9: break test_query = "who are BTS team members" retriever = index.as_retriever(similarity_top_k=3, image_similarity_top_k=5) retrieval_results = retriever.retrieve(test_query) from llama_index.core.response.notebook_utils import display_source_node from llama_index.core.schema import ImageNode retrieved_image = [] for res_node in retrieval_results: if isinstance(res_node.node, ImageNode): retrieved_image.append(res_node.node.metadata["file_path"]) else: display_source_node(res_node, source_length=200) plot_images(retrieved_image) test_query = "what are Vincent van Gogh's famous paintings" retriever = index.as_retriever(similarity_top_k=3, image_similarity_top_k=5) retrieval_results = retriever.retrieve(test_query) retrieved_image = [] for res_node in retrieval_results: if isinstance(res_node.node, ImageNode): retrieved_image.append(res_node.node.metadata["file_path"]) else: display_source_node(res_node, source_length=200) plot_images(retrieved_image) test_query = "what is the popular tourist attraction in San Francisco" retriever = index.as_retriever(similarity_top_k=3, image_similarity_top_k=5) retrieval_results = retriever.retrieve(test_query) retrieved_image = [] for res_node in retrieval_results: if isinstance(res_node.node, ImageNode): retrieved_image.append(res_node.node.metadata["file_path"]) else:
display_source_node(res_node, source_length=200)
llama_index.core.response.notebook_utils.display_source_node