prompt
stringlengths
43
25.9k
completion
stringlengths
7
362
api
stringlengths
18
90
from langchain.callbacks.manager import CallbackManager from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler from langchain.prompts import PromptTemplate from langchain_community.llms import TitanTakeoffPro llm =
TitanTakeoffPro()
langchain_community.llms.TitanTakeoffPro
URL = "" # Your Fiddler instance URL, Make sure to include the full URL (including https://). For example: https://demo.fiddler.ai ORG_NAME = "" AUTH_TOKEN = "" # Your Fiddler instance auth token PROJECT_NAME = "" MODEL_NAME = "" # Model name in Fiddler from langchain_community.callbacks.fiddler_callback import FiddlerCallbackHandler fiddler_handler = FiddlerCallbackHandler( url=URL, org=ORG_NAME, project=PROJECT_NAME, model=MODEL_NAME, api_key=AUTH_TOKEN, ) from langchain_core.output_parsers import StrOutputParser from langchain_openai import OpenAI llm = OpenAI(temperature=0, streaming=True, callbacks=[fiddler_handler]) output_parser = StrOutputParser() chain = llm | output_parser chain.invoke("How far is moon from earth?") chain.invoke("What is the temperature on Mars?") chain.invoke("How much is 2 + 200000?") chain.invoke("Which movie won the oscars this year?") chain.invoke("Can you write me a poem about insomnia?") chain.invoke("How are you doing today?") chain.invoke("What is the meaning of life?") from langchain.prompts import ( ChatPromptTemplate, FewShotChatMessagePromptTemplate, ) examples = [ {"input": "2+2", "output": "4"}, {"input": "2+3", "output": "5"}, ] example_prompt =
ChatPromptTemplate.from_messages( [ ("human", "{input}")
langchain.prompts.ChatPromptTemplate.from_messages
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai') from langchain_community.chat_models import ChatAnthropic from langchain_openai import ChatOpenAI from unittest.mock import patch import httpx from openai import RateLimitError request = httpx.Request("GET", "/") response = httpx.Response(200, request=request) error = RateLimitError("rate limit", response=response, body="") openai_llm = ChatOpenAI(max_retries=0) anthropic_llm = ChatAnthropic() llm = openai_llm.with_fallbacks([anthropic_llm]) with patch("openai.resources.chat.completions.Completions.create", side_effect=error): try: print(openai_llm.invoke("Why did the chicken cross the road?")) except RateLimitError: print("Hit error") with patch("openai.resources.chat.completions.Completions.create", side_effect=error): try: print(llm.invoke("Why did the chicken cross the road?")) except RateLimitError: print("Hit error") from langchain_core.prompts import ChatPromptTemplate prompt = ChatPromptTemplate.from_messages( [ ( "system", "You're a nice assistant who always includes a compliment in your response", ), ("human", "Why did the {animal} cross the road"), ] ) chain = prompt | llm with patch("openai.resources.chat.completions.Completions.create", side_effect=error): try: print(chain.invoke({"animal": "kangaroo"})) except RateLimitError: print("Hit error") from langchain_core.output_parsers import StrOutputParser chat_prompt = ChatPromptTemplate.from_messages( [ ( "system", "You're a nice assistant who always includes a compliment in your response", ), ("human", "Why did the {animal} cross the road"), ] ) chat_model = ChatOpenAI(model_name="gpt-fake") bad_chain = chat_prompt | chat_model | StrOutputParser() from langchain.prompts import PromptTemplate from langchain_openai import OpenAI prompt_template = """Instructions: You should always include a compliment in your response. Question: Why did the {animal} cross the road?""" prompt = PromptTemplate.from_template(prompt_template) llm = OpenAI() good_chain = prompt | llm chain = bad_chain.with_fallbacks([good_chain]) chain.invoke({"animal": "turtle"}) short_llm = ChatOpenAI() long_llm = ChatOpenAI(model="gpt-3.5-turbo-16k") llm = short_llm.with_fallbacks([long_llm]) inputs = "What is the next number: " + ", ".join(["one", "two"] * 3000) try: print(short_llm.invoke(inputs)) except Exception as e: print(e) try: print(llm.invoke(inputs)) except Exception as e: print(e) from langchain.output_parsers import DatetimeOutputParser prompt = ChatPromptTemplate.from_template( "what time was {event} (in %Y-%m-%dT%H:%M:%S.%fZ format - only return this value)" ) openai_35 =
ChatOpenAI()
langchain_openai.ChatOpenAI
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langkit langchain-openai langchain') from langchain.callbacks import WhyLabsCallbackHandler from langchain_openai import OpenAI whylabs = WhyLabsCallbackHandler.from_params() llm =
OpenAI(temperature=0, callbacks=[whylabs])
langchain_openai.OpenAI
from langchain.document_loaders.csv_loader import CSVLoader loader =
CSVLoader("data/corp_sens_data.csv")
langchain.document_loaders.csv_loader.CSVLoader
from getpass import getpass KAY_API_KEY = getpass() OPENAI_API_KEY = getpass() import os os.environ["KAY_API_KEY"] = KAY_API_KEY os.environ["OPENAI_API_KEY"] = OPENAI_API_KEY from langchain.chains import ConversationalRetrievalChain from langchain.retrievers import KayAiRetriever from langchain_openai import ChatOpenAI model = ChatOpenAI(model_name="gpt-3.5-turbo") retriever = KayAiRetriever.create( dataset_id="company", data_types=["PressRelease"], num_contexts=6 ) qa =
ConversationalRetrievalChain.from_llm(model, retriever=retriever)
langchain.chains.ConversationalRetrievalChain.from_llm
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-robocorp') from langchain.agents import AgentExecutor, OpenAIFunctionsAgent from langchain_core.messages import SystemMessage from langchain_openai import ChatOpenAI from langchain_robocorp import ActionServerToolkit llm =
ChatOpenAI(model="gpt-4", temperature=0)
langchain_openai.ChatOpenAI
from langchain_community.graphs import OntotextGraphDBGraph graph = OntotextGraphDBGraph( query_endpoint="http://localhost:7200/repositories/langchain", query_ontology="CONSTRUCT {?s ?p ?o} FROM <https://swapi.co/ontology/> WHERE {?s ?p ?o}", ) graph = OntotextGraphDBGraph( query_endpoint="http://localhost:7200/repositories/langchain", local_file="/path/to/langchain_graphdb_tutorial/starwars-ontology.nt", # change the path here ) import os from langchain.chains import OntotextGraphDBQAChain from langchain_openai import ChatOpenAI os.environ["OPENAI_API_KEY"] = "sk-***" chain = OntotextGraphDBQAChain.from_llm(
ChatOpenAI(temperature=0, model_name="gpt-4-1106-preview")
langchain_openai.ChatOpenAI
meals = [ "Beef Enchiladas with Feta cheese. Mexican-Greek fusion", "Chicken Flatbreads with red sauce. Italian-Mexican fusion", "Veggie sweet potato quesadillas with vegan cheese", "One-Pan Tortelonni bake with peppers and onions", ] from langchain_openai import OpenAI llm = OpenAI(model="gpt-3.5-turbo-instruct") from langchain.prompts import PromptTemplate PROMPT_TEMPLATE = """Here is the description of a meal: "{meal}". Embed the meal into the given text: "{text_to_personalize}". Prepend a personalized message including the user's name "{user}" and their preference "{preference}". Make it sound good. """ PROMPT = PromptTemplate( input_variables=["meal", "text_to_personalize", "user", "preference"], template=PROMPT_TEMPLATE, ) import langchain_experimental.rl_chain as rl_chain chain = rl_chain.PickBest.from_llm(llm=llm, prompt=PROMPT) response = chain.run( meal=rl_chain.ToSelectFrom(meals), user=rl_chain.BasedOn("Tom"), preference=
rl_chain.BasedOn(["Vegetarian", "regular dairy is ok"])
langchain_experimental.rl_chain.BasedOn
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai context-python') import os from langchain.callbacks import ContextCallbackHandler token = os.environ["CONTEXT_API_TOKEN"] context_callback = ContextCallbackHandler(token) import os from langchain.callbacks import ContextCallbackHandler from langchain.schema import ( HumanMessage, SystemMessage, ) from langchain_openai import ChatOpenAI token = os.environ["CONTEXT_API_TOKEN"] chat = ChatOpenAI( headers={"user_id": "123"}, temperature=0, callbacks=[ContextCallbackHandler(token)] ) messages = [ SystemMessage( content="You are a helpful assistant that translates English to French." ), HumanMessage(content="I love programming."), ] print(chat(messages)) import os from langchain.callbacks import ContextCallbackHandler from langchain.chains import LLMChain from langchain.prompts import PromptTemplate from langchain.prompts.chat import ( ChatPromptTemplate, HumanMessagePromptTemplate, ) from langchain_openai import ChatOpenAI token = os.environ["CONTEXT_API_TOKEN"] human_message_prompt = HumanMessagePromptTemplate( prompt=PromptTemplate( template="What is a good name for a company that makes {product}?", input_variables=["product"], ) ) chat_prompt_template = ChatPromptTemplate.from_messages([human_message_prompt]) callback =
ContextCallbackHandler(token)
langchain.callbacks.ContextCallbackHandler
get_ipython().run_line_magic('pip', 'install --upgrade --quiet tiktoken langchain-openai python-dotenv datasets langchain deeplake beautifulsoup4 html2text ragas') ORG_ID = "..." import getpass import os from langchain.chains import RetrievalQA from langchain.vectorstores.deeplake import DeepLake from langchain_openai import OpenAIChat, OpenAIEmbeddings os.environ["OPENAI_API_KEY"] = getpass.getpass("Enter your OpenAI API token: ") os.environ["ACTIVELOOP_TOKEN"] = getpass.getpass( "Enter your ActiveLoop API token: " ) # Get your API token from https://app.activeloop.ai, click on your profile picture in the top right corner, and select "API Tokens" token = os.getenv("ACTIVELOOP_TOKEN") openai_embeddings = OpenAIEmbeddings() db = DeepLake( dataset_path=f"hub://{ORG_ID}/deeplake-docs-deepmemory", # org_id stands for your username or organization from activeloop embedding=openai_embeddings, runtime={"tensor_db": True}, token=token, read_only=False, ) from urllib.parse import urljoin import requests from bs4 import BeautifulSoup def get_all_links(url): response = requests.get(url) if response.status_code != 200: print(f"Failed to retrieve the page: {url}") return [] soup = BeautifulSoup(response.content, "html.parser") links = [ urljoin(url, a["href"]) for a in soup.find_all("a", href=True) if a["href"] ] return links base_url = "https://docs.deeplake.ai/en/latest/" all_links = get_all_links(base_url) from langchain.document_loaders import AsyncHtmlLoader loader = AsyncHtmlLoader(all_links) docs = loader.load() from langchain.document_transformers import Html2TextTransformer html2text = Html2TextTransformer() docs_transformed = html2text.transform_documents(docs) from langchain_text_splitters import RecursiveCharacterTextSplitter chunk_size = 4096 docs_new = [] text_splitter = RecursiveCharacterTextSplitter( chunk_size=chunk_size, ) for doc in docs_transformed: if len(doc.page_content) < chunk_size: docs_new.append(doc) else: docs = text_splitter.create_documents([doc.page_content]) docs_new.extend(docs) docs = db.add_documents(docs_new) from typing import List from langchain.chains.openai_functions import ( create_structured_output_chain, ) from langchain_core.messages import HumanMessage, SystemMessage from langchain_core.prompts import ChatPromptTemplate, HumanMessagePromptTemplate from langchain_openai import ChatOpenAI from pydantic import BaseModel, Field docs = db.vectorstore.dataset.text.data(fetch_chunks=True, aslist=True)["value"] ids = db.vectorstore.dataset.id.data(fetch_chunks=True, aslist=True)["value"] llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0) class Questions(BaseModel): """Identifying information about a person.""" question: str = Field(..., description="Questions about text") prompt_msgs = [ SystemMessage( content="You are a world class expert for generating questions based on provided context. \ You make sure the question can be answered by the text." ), HumanMessagePromptTemplate.from_template( "Use the given text to generate a question from the following input: {input}" ), HumanMessage(content="Tips: Make sure to answer in the correct format"), ] prompt = ChatPromptTemplate(messages=prompt_msgs) chain = create_structured_output_chain(Questions, llm, prompt, verbose=True) text = "# Understanding Hallucinations and Bias ## **Introduction** In this lesson, we'll cover the concept of **hallucinations** in LLMs, highlighting their influence on AI applications and demonstrating how to mitigate them using techniques like the retriever's architectures. We'll also explore **bias** within LLMs with examples." questions = chain.run(input=text) print(questions) import random from langchain_openai import OpenAIEmbeddings from tqdm import tqdm def generate_queries(docs: List[str], ids: List[str], n: int = 100): questions = [] relevances = [] pbar = tqdm(total=n) while len(questions) < n: r = random.randint(0, len(docs) - 1) text, label = docs[r], ids[r] generated_qs = [chain.run(input=text).question] questions.extend(generated_qs) relevances.extend([[(label, 1)] for _ in generated_qs]) pbar.update(len(generated_qs)) if len(questions) % 10 == 0: print(f"q: {len(questions)}") return questions[:n], relevances[:n] chain = create_structured_output_chain(Questions, llm, prompt, verbose=False) questions, relevances = generate_queries(docs, ids, n=200) train_questions, train_relevances = questions[:100], relevances[:100] test_questions, test_relevances = questions[100:], relevances[100:] job_id = db.vectorstore.deep_memory.train( queries=train_questions, relevance=train_relevances, ) db.vectorstore.deep_memory.status("6538939ca0b69a9ca45c528c") recall = db.vectorstore.deep_memory.evaluate( queries=test_questions, relevance=test_relevances, ) from ragas.langchain import RagasEvaluatorChain from ragas.metrics import ( context_recall, ) def convert_relevance_to_ground_truth(docs, relevance): ground_truths = [] for rel in relevance: ground_truth = [] for doc_id, _ in rel: ground_truth.append(docs[doc_id]) ground_truths.append(ground_truth) return ground_truths ground_truths = convert_relevance_to_ground_truth(docs, test_relevances) for deep_memory in [False, True]: print("\nEvaluating with deep_memory =", deep_memory) print("===================================") retriever = db.as_retriever() retriever.search_kwargs["deep_memory"] = deep_memory qa_chain = RetrievalQA.from_chain_type( llm=OpenAIChat(model="gpt-3.5-turbo"), chain_type="stuff", retriever=retriever, return_source_documents=True, ) metrics = { "context_recall_score": 0, } eval_chains = {m.name: RagasEvaluatorChain(metric=m) for m in [context_recall]} for question, ground_truth in zip(test_questions, ground_truths): result = qa_chain({"query": question}) result["ground_truths"] = ground_truth for name, eval_chain in eval_chains.items(): score_name = f"{name}_score" metrics[score_name] += eval_chain(result)[score_name] for metric in metrics: metrics[metric] /= len(test_questions) print(f"{metric}: {metrics[metric]}") print("===================================") retriever = db.as_retriever() retriever.search_kwargs["deep_memory"] = True retriever.search_kwargs["k"] = 10 query = "Deamination of cytidine to uridine on the minus strand of viral DNA results in catastrophic G-to-A mutations in the viral genome." qa = RetrievalQA.from_chain_type( llm=
OpenAIChat(model="gpt-4")
langchain_openai.OpenAIChat
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-core langchain-experimental langchain-openai') from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ( ChatPromptTemplate, ) from langchain_experimental.utilities import PythonREPL from langchain_openai import ChatOpenAI template = """Write some python code to solve the user's problem. Return only python code in Markdown format, e.g.: ```python .... ```""" prompt = ChatPromptTemplate.from_messages([("system", template), ("human", "{input}")]) model = ChatOpenAI() def _sanitize_output(text: str): _, after = text.split("```python") return after.split("```")[0] chain = prompt | model |
StrOutputParser()
langchain_core.output_parsers.StrOutputParser
get_ipython().run_line_magic('pip', 'install --upgrade --quiet dashvector dashscope') import getpass import os os.environ["DASHVECTOR_API_KEY"] = getpass.getpass("DashVector API Key:") os.environ["DASHSCOPE_API_KEY"] = getpass.getpass("DashScope API Key:") from langchain_community.embeddings.dashscope import DashScopeEmbeddings from langchain_community.vectorstores import DashVector from langchain_text_splitters import CharacterTextSplitter from langchain_community.document_loaders import TextLoader loader = TextLoader("../../modules/state_of_the_union.txt") documents = loader.load() text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) docs = text_splitter.split_documents(documents) embeddings =
DashScopeEmbeddings()
langchain_community.embeddings.dashscope.DashScopeEmbeddings
get_ipython().run_line_magic('pip', 'install --upgrade --quiet O365') get_ipython().run_line_magic('pip', 'install --upgrade --quiet beautifulsoup4 # This is optional but is useful for parsing HTML messages') from langchain_community.agent_toolkits import O365Toolkit toolkit =
O365Toolkit()
langchain_community.agent_toolkits.O365Toolkit
get_ipython().run_line_magic('pip', 'install --upgrade --quiet clearml') get_ipython().run_line_magic('pip', 'install --upgrade --quiet pandas') get_ipython().run_line_magic('pip', 'install --upgrade --quiet textstat') get_ipython().run_line_magic('pip', 'install --upgrade --quiet spacy') get_ipython().system('python -m spacy download en_core_web_sm') import os os.environ["CLEARML_API_ACCESS_KEY"] = "" os.environ["CLEARML_API_SECRET_KEY"] = "" os.environ["OPENAI_API_KEY"] = "" os.environ["SERPAPI_API_KEY"] = "" from langchain.callbacks import ClearMLCallbackHandler from langchain.callbacks import StdOutCallbackHandler from langchain_openai import OpenAI clearml_callback = ClearMLCallbackHandler( task_type="inference", project_name="langchain_callback_demo", task_name="llm", tags=["test"], visualize=True, complexity_metrics=True, stream_logs=True, ) callbacks = [StdOutCallbackHandler(), clearml_callback] llm = OpenAI(temperature=0, callbacks=callbacks) llm_result = llm.generate(["Tell me a joke", "Tell me a poem"] * 3) clearml_callback.flush_tracker(langchain_asset=llm, name="simple_sequential") from langchain.agents import AgentType, initialize_agent, load_tools tools =
load_tools(["serpapi", "llm-math"], llm=llm, callbacks=callbacks)
langchain.agents.load_tools
get_ipython().run_line_magic('pip', 'install --upgrade --quiet airbyte-source-hubspot') from langchain_community.document_loaders.airbyte import AirbyteHubspotLoader config = { } loader = AirbyteHubspotLoader( config=config, stream_name="products" ) # check the documentation linked above for a list of all streams docs = loader.load() docs_iterator = loader.lazy_load() from langchain.docstore.document import Document def handle_record(record, id): return
Document(page_content=record.data["title"], metadata=record.data)
langchain.docstore.document.Document
get_ipython().system(' pip install -U langchain openai chromadb langchain-experimental # (newest versions required for multi-modal)') get_ipython().system(' pip install "unstructured[all-docs]==0.10.19" pillow pydantic lxml pillow matplotlib tiktoken open_clip_torch torch') path = "/Users/rlm/Desktop/cpi/" from langchain_community.document_loaders import PyPDFLoader loader = PyPDFLoader(path + "cpi.pdf") pdf_pages = loader.load() from langchain_text_splitters import RecursiveCharacterTextSplitter text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0) all_splits_pypdf = text_splitter.split_documents(pdf_pages) all_splits_pypdf_texts = [d.page_content for d in all_splits_pypdf] from unstructured.partition.pdf import partition_pdf raw_pdf_elements = partition_pdf( filename=path + "cpi.pdf", extract_images_in_pdf=True, infer_table_structure=True, chunking_strategy="by_title", max_characters=4000, new_after_n_chars=3800, combine_text_under_n_chars=2000, image_output_dir_path=path, ) tables = [] texts = [] for element in raw_pdf_elements: if "unstructured.documents.elements.Table" in str(type(element)): tables.append(str(element)) elif "unstructured.documents.elements.CompositeElement" in str(type(element)): texts.append(str(element)) from langchain_community.vectorstores import Chroma from langchain_openai import OpenAIEmbeddings baseline = Chroma.from_texts( texts=all_splits_pypdf_texts, collection_name="baseline", embedding=OpenAIEmbeddings(), ) retriever_baseline = baseline.as_retriever() from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate from langchain_openai import ChatOpenAI prompt_text = """You are an assistant tasked with summarizing tables and text for retrieval. \ These summaries will be embedded and used to retrieve the raw text or table elements. \ Give a concise summary of the table or text that is well optimized for retrieval. Table or text: {element} """ prompt = ChatPromptTemplate.from_template(prompt_text) model = ChatOpenAI(temperature=0, model="gpt-4") summarize_chain = {"element": lambda x: x} | prompt | model | StrOutputParser() text_summaries = summarize_chain.batch(texts, {"max_concurrency": 5}) table_summaries = summarize_chain.batch(tables, {"max_concurrency": 5}) import base64 import io import os from io import BytesIO from langchain_core.messages import HumanMessage from PIL import Image def encode_image(image_path): """Getting the base64 string""" with open(image_path, "rb") as image_file: return base64.b64encode(image_file.read()).decode("utf-8") def image_summarize(img_base64, prompt): """Image summary""" chat = ChatOpenAI(model="gpt-4-vision-preview", max_tokens=1024) msg = chat.invoke( [ HumanMessage( content=[ {"type": "text", "text": prompt}, { "type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{img_base64}"}, }, ] ) ] ) return msg.content img_base64_list = [] image_summaries = [] prompt = """You are an assistant tasked with summarizing images for retrieval. \ These summaries will be embedded and used to retrieve the raw image. \ Give a concise summary of the image that is well optimized for retrieval.""" for img_file in sorted(os.listdir(path)): if img_file.endswith(".jpg"): img_path = os.path.join(path, img_file) base64_image = encode_image(img_path) img_base64_list.append(base64_image) image_summaries.append(image_summarize(base64_image, prompt)) import uuid from base64 import b64decode from langchain.retrievers.multi_vector import MultiVectorRetriever from langchain.storage import InMemoryStore from langchain_core.documents import Document def create_multi_vector_retriever( vectorstore, text_summaries, texts, table_summaries, tables, image_summaries, images ): store = InMemoryStore() id_key = "doc_id" retriever = MultiVectorRetriever( vectorstore=vectorstore, docstore=store, id_key=id_key, ) def add_documents(retriever, doc_summaries, doc_contents): doc_ids = [str(uuid.uuid4()) for _ in doc_contents] summary_docs = [ Document(page_content=s, metadata={id_key: doc_ids[i]}) for i, s in enumerate(doc_summaries) ] retriever.vectorstore.add_documents(summary_docs) retriever.docstore.mset(list(zip(doc_ids, doc_contents))) if text_summaries: add_documents(retriever, text_summaries, texts) if table_summaries: add_documents(retriever, table_summaries, tables) if image_summaries: add_documents(retriever, image_summaries, images) return retriever multi_vector_img = Chroma( collection_name="multi_vector_img", embedding_function=OpenAIEmbeddings() ) retriever_multi_vector_img = create_multi_vector_retriever( multi_vector_img, text_summaries, texts, table_summaries, tables, image_summaries, img_base64_list, ) query = "What percentage of CPI is dedicated to Housing, and how does it compare to the combined percentage of Medical Care, Apparel, and Other Goods and Services?" suffix_for_images = " Include any pie charts, graphs, or tables." docs = retriever_multi_vector_img.get_relevant_documents(query + suffix_for_images) from IPython.display import HTML, display def plt_img_base64(img_base64): image_html = f'<img src="data:image/jpeg;base64,{img_base64}" />' display(HTML(image_html)) plt_img_base64(docs[1]) multi_vector_text = Chroma( collection_name="multi_vector_text", embedding_function=OpenAIEmbeddings() ) retriever_multi_vector_img_summary = create_multi_vector_retriever( multi_vector_text, text_summaries, texts, table_summaries, tables, image_summaries, image_summaries, ) from langchain_experimental.open_clip import OpenCLIPEmbeddings multimodal_embd = Chroma( collection_name="multimodal_embd", embedding_function=OpenCLIPEmbeddings() ) image_uris = sorted( [ os.path.join(path, image_name) for image_name in os.listdir(path) if image_name.endswith(".jpg") ] ) if image_uris: multimodal_embd.add_images(uris=image_uris) if texts: multimodal_embd.add_texts(texts=texts) if tables: multimodal_embd.add_texts(texts=tables) retriever_multimodal_embd = multimodal_embd.as_retriever() from operator import itemgetter from langchain_core.runnables import RunnablePassthrough template = """Answer the question based only on the following context, which can include text and tables: {context} Question: {question} """ rag_prompt_text =
ChatPromptTemplate.from_template(template)
langchain_core.prompts.ChatPromptTemplate.from_template
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-community langchainhub langchain-openai faiss-cpu') from langchain_community.document_loaders import TextLoader loader = TextLoader("../../modules/state_of_the_union.txt") documents = loader.load() from langchain_community.vectorstores import FAISS from langchain_openai import OpenAIEmbeddings from langchain_text_splitters import CharacterTextSplitter text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) texts = text_splitter.split_documents(documents) embeddings = OpenAIEmbeddings() db = FAISS.from_documents(texts, embeddings) retriever = db.as_retriever() from langchain.tools.retriever import create_retriever_tool tool = create_retriever_tool( retriever, "search_state_of_union", "Searches and returns excerpts from the 2022 State of the Union.", ) tools = [tool] from langchain import hub prompt =
hub.pull("hwchase17/openai-tools-agent")
langchain.hub.pull
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai') from langchain.model_laboratory import ModelLaboratory from langchain.prompts import PromptTemplate from langchain_community.llms import Cohere, HuggingFaceHub from langchain_openai import OpenAI import getpass import os os.environ["COHERE_API_KEY"] = getpass.getpass("Cohere API Key:") os.environ["OPENAI_API_KEY"] = getpass.getpass("Open API Key:") os.environ["HUGGINGFACEHUB_API_TOKEN"] = getpass.getpass("Hugging Face API Key:") llms = [ OpenAI(temperature=0), Cohere(temperature=0), HuggingFaceHub(repo_id="google/flan-t5-xl", model_kwargs={"temperature": 1}), ] model_lab = ModelLaboratory.from_llms(llms) model_lab.compare("What color is a flamingo?") prompt = PromptTemplate( template="What is the capital of {state}?", input_variables=["state"] ) model_lab_with_prompt = ModelLaboratory.from_llms(llms, prompt=prompt) model_lab_with_prompt.compare("New York") from langchain.chains import SelfAskWithSearchChain from langchain_community.utilities import SerpAPIWrapper open_ai_llm = OpenAI(temperature=0) search =
SerpAPIWrapper()
langchain_community.utilities.SerpAPIWrapper
get_ipython().run_line_magic('pip', 'install --upgrade --quiet neo4j') get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-openai') get_ipython().run_line_magic('pip', 'install --upgrade --quiet tiktoken') import getpass import os os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:") from langchain.docstore.document import Document from langchain_community.document_loaders import TextLoader from langchain_community.vectorstores import Neo4jVector from langchain_openai import OpenAIEmbeddings from langchain_text_splitters import CharacterTextSplitter loader = TextLoader("../../modules/state_of_the_union.txt") documents = loader.load() text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) docs = text_splitter.split_documents(documents) embeddings = OpenAIEmbeddings() url = "bolt://localhost:7687" username = "neo4j" password = "pleaseletmein" db = Neo4jVector.from_documents( docs, OpenAIEmbeddings(), url=url, username=username, password=password ) query = "What did the president say about Ketanji Brown Jackson" docs_with_score = db.similarity_search_with_score(query, k=2) for doc, score in docs_with_score: print("-" * 80) print("Score: ", score) print(doc.page_content) print("-" * 80) index_name = "vector" # default index name store = Neo4jVector.from_existing_index(
OpenAIEmbeddings()
langchain_openai.OpenAIEmbeddings
get_ipython().run_line_magic('pip', 'install --upgrade --quiet scikit-learn') from langchain_community.retrievers import TFIDFRetriever retriever = TFIDFRetriever.from_texts(["foo", "bar", "world", "hello", "foo bar"]) from langchain_core.documents import Document retriever = TFIDFRetriever.from_documents( [
Document(page_content="foo")
langchain_core.documents.Document
get_ipython().run_line_magic('pip', 'install --upgrade --quiet hdbcli') import os from hdbcli import dbapi connection = dbapi.connect( address=os.environ.get("HANA_DB_ADDRESS"), port=os.environ.get("HANA_DB_PORT"), user=os.environ.get("HANA_DB_USER"), password=os.environ.get("HANA_DB_PASSWORD"), autocommit=True, sslValidateCertificate=False, ) from langchain.docstore.document import Document from langchain_community.document_loaders import TextLoader from langchain_community.vectorstores.hanavector import HanaDB from langchain_openai import OpenAIEmbeddings from langchain_text_splitters import CharacterTextSplitter text_documents =
TextLoader("../../modules/state_of_the_union.txt")
langchain_community.document_loaders.TextLoader
def pretty_print_docs(docs): print( f"\n{'-' * 100}\n".join( [f"Document {i+1}:\n\n" + d.page_content for i, d in enumerate(docs)] ) ) from langchain_community.document_loaders import TextLoader from langchain_community.vectorstores import FAISS from langchain_openai import OpenAIEmbeddings from langchain_text_splitters import CharacterTextSplitter documents = TextLoader("../../state_of_the_union.txt").load() text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) texts = text_splitter.split_documents(documents) retriever = FAISS.from_documents(texts, OpenAIEmbeddings()).as_retriever() docs = retriever.get_relevant_documents( "What did the president say about Ketanji Brown Jackson" ) pretty_print_docs(docs) from langchain.retrievers import ContextualCompressionRetriever from langchain.retrievers.document_compressors import LLMChainExtractor from langchain_openai import OpenAI llm =
OpenAI(temperature=0)
langchain_openai.OpenAI
meals = [ "Beef Enchiladas with Feta cheese. Mexican-Greek fusion", "Chicken Flatbreads with red sauce. Italian-Mexican fusion", "Veggie sweet potato quesadillas with vegan cheese", "One-Pan Tortelonni bake with peppers and onions", ] from langchain_openai import OpenAI llm = OpenAI(model="gpt-3.5-turbo-instruct") from langchain.prompts import PromptTemplate PROMPT_TEMPLATE = """Here is the description of a meal: "{meal}". Embed the meal into the given text: "{text_to_personalize}". Prepend a personalized message including the user's name "{user}" and their preference "{preference}". Make it sound good. """ PROMPT = PromptTemplate( input_variables=["meal", "text_to_personalize", "user", "preference"], template=PROMPT_TEMPLATE, ) import langchain_experimental.rl_chain as rl_chain chain = rl_chain.PickBest.from_llm(llm=llm, prompt=PROMPT) response = chain.run( meal=rl_chain.ToSelectFrom(meals), user=rl_chain.BasedOn("Tom"), preference=rl_chain.BasedOn(["Vegetarian", "regular dairy is ok"]), text_to_personalize="This is the weeks specialty dish, our master chefs \ believe you will love it!", ) print(response["response"]) for _ in range(5): try: response = chain.run( meal=rl_chain.ToSelectFrom(meals), user=rl_chain.BasedOn("Tom"), preference=rl_chain.BasedOn(["Vegetarian", "regular dairy is ok"]), text_to_personalize="This is the weeks specialty dish, our master chefs believe you will love it!", ) except Exception as e: print(e) print(response["response"]) print() scoring_criteria_template = ( "Given {preference} rank how good or bad this selection is {meal}" ) chain = rl_chain.PickBest.from_llm( llm=llm, prompt=PROMPT, selection_scorer=rl_chain.AutoSelectionScorer( llm=llm, scoring_criteria_template_str=scoring_criteria_template ), ) response = chain.run( meal=rl_chain.ToSelectFrom(meals), user=rl_chain.BasedOn("Tom"), preference=rl_chain.BasedOn(["Vegetarian", "regular dairy is ok"]), text_to_personalize="This is the weeks specialty dish, our master chefs believe you will love it!", ) print(response["response"]) selection_metadata = response["selection_metadata"] print( f"selected index: {selection_metadata.selected.index}, score: {selection_metadata.selected.score}" ) class CustomSelectionScorer(rl_chain.SelectionScorer): def score_response( self, inputs, llm_response: str, event: rl_chain.PickBestEvent ) -> float: print(event.based_on) print(event.to_select_from) selected_meal = event.to_select_from["meal"][event.selected.index] print(f"selected meal: {selected_meal}") if "Tom" in event.based_on["user"]: if "Vegetarian" in event.based_on["preference"]: if "Chicken" in selected_meal or "Beef" in selected_meal: return 0.0 else: return 1.0 else: if "Chicken" in selected_meal or "Beef" in selected_meal: return 1.0 else: return 0.0 else: raise NotImplementedError("I don't know how to score this user") chain = rl_chain.PickBest.from_llm( llm=llm, prompt=PROMPT, selection_scorer=CustomSelectionScorer(), ) response = chain.run( meal=rl_chain.ToSelectFrom(meals), user=rl_chain.BasedOn("Tom"), preference=rl_chain.BasedOn(["Vegetarian", "regular dairy is ok"]), text_to_personalize="This is the weeks specialty dish, our master chefs believe you will love it!", ) class CustomSelectionScorer(rl_chain.SelectionScorer): def score_preference(self, preference, selected_meal): if "Vegetarian" in preference: if "Chicken" in selected_meal or "Beef" in selected_meal: return 0.0 else: return 1.0 else: if "Chicken" in selected_meal or "Beef" in selected_meal: return 1.0 else: return 0.0 def score_response( self, inputs, llm_response: str, event: rl_chain.PickBestEvent ) -> float: selected_meal = event.to_select_from["meal"][event.selected.index] if "Tom" in event.based_on["user"]: return self.score_preference(event.based_on["preference"], selected_meal) elif "Anna" in event.based_on["user"]: return self.score_preference(event.based_on["preference"], selected_meal) else: raise NotImplementedError("I don't know how to score this user") chain = rl_chain.PickBest.from_llm( llm=llm, prompt=PROMPT, selection_scorer=CustomSelectionScorer(), metrics_step=5, metrics_window_size=5, # rolling window average ) random_chain = rl_chain.PickBest.from_llm( llm=llm, prompt=PROMPT, selection_scorer=CustomSelectionScorer(), metrics_step=5, metrics_window_size=5, # rolling window average policy=rl_chain.PickBestRandomPolicy, # set the random policy instead of default ) for _ in range(20): try: chain.run( meal=rl_chain.ToSelectFrom(meals), user=rl_chain.BasedOn("Tom"), preference=rl_chain.BasedOn(["Vegetarian", "regular dairy is ok"]), text_to_personalize="This is the weeks specialty dish, our master chefs believe you will love it!", ) random_chain.run( meal=rl_chain.ToSelectFrom(meals), user=
rl_chain.BasedOn("Tom")
langchain_experimental.rl_chain.BasedOn
import os from langchain.chains import ConversationalRetrievalChain from langchain_community.vectorstores import Vectara from langchain_openai import OpenAI from langchain_community.document_loaders import TextLoader loader = TextLoader("state_of_the_union.txt") documents = loader.load() vectara =
Vectara.from_documents(documents, embedding=None)
langchain_community.vectorstores.Vectara.from_documents
get_ipython().system('pip install langchain lark openai elasticsearch pandas') import pandas as pd details = ( pd.read_csv("~/Downloads/archive/Hotel_details.csv") .drop_duplicates(subset="hotelid") .set_index("hotelid") ) attributes = pd.read_csv( "~/Downloads/archive/Hotel_Room_attributes.csv", index_col="id" ) price = pd.read_csv("~/Downloads/archive/hotels_RoomPrice.csv", index_col="id") latest_price = price.drop_duplicates(subset="refid", keep="last")[ [ "hotelcode", "roomtype", "onsiterate", "roomamenities", "maxoccupancy", "mealinclusiontype", ] ] latest_price["ratedescription"] = attributes.loc[latest_price.index]["ratedescription"] latest_price = latest_price.join( details[["hotelname", "city", "country", "starrating"]], on="hotelcode" ) latest_price = latest_price.rename({"ratedescription": "roomdescription"}, axis=1) latest_price["mealsincluded"] = ~latest_price["mealinclusiontype"].isnull() latest_price.pop("hotelcode") latest_price.pop("mealinclusiontype") latest_price = latest_price.reset_index(drop=True) latest_price.head() from langchain_openai import ChatOpenAI model = ChatOpenAI(model="gpt-4") res = model.predict( "Below is a table with information about hotel rooms. " "Return a JSON list with an entry for each column. Each entry should have " '{"name": "column name", "description": "column description", "type": "column data type"}' f"\n\n{latest_price.head()}\n\nJSON:\n" ) import json attribute_info = json.loads(res) attribute_info latest_price.nunique()[latest_price.nunique() < 40] attribute_info[-2][ "description" ] += f". Valid values are {sorted(latest_price['starrating'].value_counts().index.tolist())}" attribute_info[3][ "description" ] += f". Valid values are {sorted(latest_price['maxoccupancy'].value_counts().index.tolist())}" attribute_info[-3][ "description" ] += f". Valid values are {sorted(latest_price['country'].value_counts().index.tolist())}" attribute_info from langchain.chains.query_constructor.base import ( get_query_constructor_prompt, load_query_constructor_runnable, ) doc_contents = "Detailed description of a hotel room" prompt = get_query_constructor_prompt(doc_contents, attribute_info) print(prompt.format(query="{query}")) chain = load_query_constructor_runnable( ChatOpenAI(model="gpt-3.5-turbo", temperature=0), doc_contents, attribute_info ) chain.invoke({"query": "I want a hotel in Southern Europe and my budget is 200 bucks."}) chain.invoke( { "query": "Find a 2-person room in Vienna or London, preferably with meals included and AC" } ) attribute_info[-3][ "description" ] += ". NOTE: Only use the 'eq' operator if a specific country is mentioned. If a region is mentioned, include all relevant countries in filter." chain = load_query_constructor_runnable( ChatOpenAI(model="gpt-3.5-turbo", temperature=0), doc_contents, attribute_info, ) chain.invoke({"query": "I want a hotel in Southern Europe and my budget is 200 bucks."}) content_attr = ["roomtype", "roomamenities", "roomdescription", "hotelname"] doc_contents = "A detailed description of a hotel room, including information about the room type and room amenities." filter_attribute_info = tuple( ai for ai in attribute_info if ai["name"] not in content_attr ) chain = load_query_constructor_runnable( ChatOpenAI(model="gpt-3.5-turbo", temperature=0), doc_contents, filter_attribute_info, ) chain.invoke( { "query": "Find a 2-person room in Vienna or London, preferably with meals included and AC" } ) examples = [ ( "I want a hotel in the Balkans with a king sized bed and a hot tub. Budget is $300 a night", { "query": "king-sized bed, hot tub", "filter": 'and(in("country", ["Bulgaria", "Greece", "Croatia", "Serbia"]), lte("onsiterate", 300))', }, ), ( "A room with breakfast included for 3 people, at a Hilton", { "query": "Hilton", "filter": 'and(eq("mealsincluded", true), gte("maxoccupancy", 3))', }, ), ] prompt = get_query_constructor_prompt( doc_contents, filter_attribute_info, examples=examples ) print(prompt.format(query="{query}")) chain = load_query_constructor_runnable( ChatOpenAI(model="gpt-3.5-turbo", temperature=0), doc_contents, filter_attribute_info, examples=examples, ) chain.invoke( { "query": "Find a 2-person room in Vienna or London, preferably with meals included and AC" } ) chain.invoke( { "query": "I want to stay somewhere highly rated along the coast. I want a room with a patio and a fireplace." } ) chain = load_query_constructor_runnable(
ChatOpenAI(model="gpt-3.5-turbo", temperature=0)
langchain_openai.ChatOpenAI
get_ipython().run_cell_magic('writefile', 'whatsapp_chat.txt', "[8/15/23, 9:12:33 AM] Dr. Feather: \u200eMessages and calls are end-to-end encrypted. No one outside of this chat, not even WhatsApp, can read or listen to them.\n[8/15/23, 9:12:43 AM] Dr. Feather: I spotted a rare Hyacinth Macaw yesterday in the Amazon Rainforest. Such a magnificent creature!\n\u200e[8/15/23, 9:12:48 AM] Dr. Feather: \u200eimage omitted\n[8/15/23, 9:13:15 AM] Jungle Jane: That's stunning! Were you able to observe its behavior?\n\u200e[8/15/23, 9:13:23 AM] Dr. Feather: \u200eimage omitted\n[8/15/23, 9:14:02 AM] Dr. Feather: Yes, it seemed quite social with other macaws. They're known for their playful nature.\n[8/15/23, 9:14:15 AM] Jungle Jane: How's the research going on parrot communication?\n\u200e[8/15/23, 9:14:30 AM] Dr. Feather: \u200eimage omitted\n[8/15/23, 9:14:50 AM] Dr. Feather: It's progressing well. We're learning so much about how they use sound and color to communicate.\n[8/15/23, 9:15:10 AM] Jungle Jane: That's fascinating! Can't wait to read your paper on it.\n[8/15/23, 9:15:20 AM] Dr. Feather: Thank you! I'll send you a draft soon.\n[8/15/23, 9:25:16 PM] Jungle Jane: Looking forward to it! Keep up the great work.\n") from langchain_community.chat_loaders.whatsapp import WhatsAppChatLoader loader = WhatsAppChatLoader( path="./whatsapp_chat.txt", ) from typing import List from langchain_community.chat_loaders.base import ChatSession from langchain_community.chat_loaders.utils import ( map_ai_messages, merge_chat_runs, ) raw_messages = loader.lazy_load() merged_messages = merge_chat_runs(raw_messages) messages: List[ChatSession] = list(
map_ai_messages(merged_messages, sender="Dr. Feather")
langchain_community.chat_loaders.utils.map_ai_messages
get_ipython().run_line_magic('pip', 'install --upgrade --quiet sagemaker') get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-openai') get_ipython().run_line_magic('pip', 'install --upgrade --quiet google-search-results') import os os.environ["OPENAI_API_KEY"] = "<ADD-KEY-HERE>" os.environ["SERPAPI_API_KEY"] = "<ADD-KEY-HERE>" from langchain.agents import initialize_agent, load_tools from langchain.callbacks import SageMakerCallbackHandler from langchain.chains import LLMChain, SimpleSequentialChain from langchain.prompts import PromptTemplate from langchain_openai import OpenAI from sagemaker.analytics import ExperimentAnalytics from sagemaker.experiments.run import Run from sagemaker.session import Session HPARAMS = { "temperature": 0.1, "model_name": "gpt-3.5-turbo-instruct", } BUCKET_NAME = None EXPERIMENT_NAME = "langchain-sagemaker-tracker" session = Session(default_bucket=BUCKET_NAME) RUN_NAME = "run-scenario-1" PROMPT_TEMPLATE = "tell me a joke about {topic}" INPUT_VARIABLES = {"topic": "fish"} with Run( experiment_name=EXPERIMENT_NAME, run_name=RUN_NAME, sagemaker_session=session ) as run: sagemaker_callback = SageMakerCallbackHandler(run) llm = OpenAI(callbacks=[sagemaker_callback], **HPARAMS) prompt = PromptTemplate.from_template(template=PROMPT_TEMPLATE) chain = LLMChain(llm=llm, prompt=prompt, callbacks=[sagemaker_callback]) chain.run(**INPUT_VARIABLES) sagemaker_callback.flush_tracker() RUN_NAME = "run-scenario-2" PROMPT_TEMPLATE_1 = """You are a playwright. Given the title of play, it is your job to write a synopsis for that title. Title: {title} Playwright: This is a synopsis for the above play:""" PROMPT_TEMPLATE_2 = """You are a play critic from the New York Times. Given the synopsis of play, it is your job to write a review for that play. Play Synopsis: {synopsis} Review from a New York Times play critic of the above play:""" INPUT_VARIABLES = { "input": "documentary about good video games that push the boundary of game design" } with Run( experiment_name=EXPERIMENT_NAME, run_name=RUN_NAME, sagemaker_session=session ) as run: sagemaker_callback = SageMakerCallbackHandler(run) prompt_template1 = PromptTemplate.from_template(template=PROMPT_TEMPLATE_1) prompt_template2 = PromptTemplate.from_template(template=PROMPT_TEMPLATE_2) llm = OpenAI(callbacks=[sagemaker_callback], **HPARAMS) chain1 = LLMChain(llm=llm, prompt=prompt_template1, callbacks=[sagemaker_callback]) chain2 = LLMChain(llm=llm, prompt=prompt_template2, callbacks=[sagemaker_callback]) overall_chain = SimpleSequentialChain( chains=[chain1, chain2], callbacks=[sagemaker_callback] ) overall_chain.run(**INPUT_VARIABLES) sagemaker_callback.flush_tracker() RUN_NAME = "run-scenario-3" PROMPT_TEMPLATE = "Who is the oldest person alive? And what is their current age raised to the power of 1.51?" with Run( experiment_name=EXPERIMENT_NAME, run_name=RUN_NAME, sagemaker_session=session ) as run: sagemaker_callback =
SageMakerCallbackHandler(run)
langchain.callbacks.SageMakerCallbackHandler
get_ipython().run_line_magic('pip', 'install --upgrade --quiet weaviate-client') import getpass import os os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:") WEAVIATE_URL = getpass.getpass("WEAVIATE_URL:") os.environ["WEAVIATE_API_KEY"] = getpass.getpass("WEAVIATE_API_KEY:") WEAVIATE_API_KEY = os.environ["WEAVIATE_API_KEY"] from langchain_community.document_loaders import TextLoader from langchain_community.vectorstores import Weaviate from langchain_openai import OpenAIEmbeddings from langchain_text_splitters import CharacterTextSplitter from langchain_community.document_loaders import TextLoader loader = TextLoader("../../modules/state_of_the_union.txt") documents = loader.load() text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) docs = text_splitter.split_documents(documents) embeddings = OpenAIEmbeddings() db =
Weaviate.from_documents(docs, embeddings, weaviate_url=WEAVIATE_URL, by_text=False)
langchain_community.vectorstores.Weaviate.from_documents
from langchain.chains import LLMMathChain from langchain_openai import OpenAI llm =
OpenAI(temperature=0)
langchain_openai.OpenAI
get_ipython().system(' nomic login') get_ipython().system(' nomic login token') get_ipython().system(' pip install -U langchain-nomic langchain_community tiktoken langchain-openai chromadb langchain') import os os.environ["LANGCHAIN_TRACING_V2"] = "true" os.environ["LANGCHAIN_ENDPOINT"] = "https://api.smith.langchain.com" os.environ["LANGCHAIN_API_KEY"] = "api_key" from langchain_community.document_loaders import WebBaseLoader urls = [ "https://lilianweng.github.io/posts/2023-06-23-agent/", "https://lilianweng.github.io/posts/2023-03-15-prompt-engineering/", "https://lilianweng.github.io/posts/2023-10-25-adv-attack-llm/", ] docs = [WebBaseLoader(url).load() for url in urls] docs_list = [item for sublist in docs for item in sublist] from langchain_text_splitters import CharacterTextSplitter text_splitter = CharacterTextSplitter.from_tiktoken_encoder( chunk_size=7500, chunk_overlap=100 ) doc_splits = text_splitter.split_documents(docs_list) import tiktoken encoding = tiktoken.get_encoding("cl100k_base") encoding = tiktoken.encoding_for_model("gpt-3.5-turbo") for d in doc_splits: print("The document is %s tokens" % len(encoding.encode(d.page_content))) import os from langchain_community.vectorstores import Chroma from langchain_core.output_parsers import StrOutputParser from langchain_core.runnables import RunnableLambda, RunnablePassthrough from langchain_nomic import NomicEmbeddings from langchain_nomic.embeddings import NomicEmbeddings vectorstore = Chroma.from_documents( documents=doc_splits, collection_name="rag-chroma", embedding=
NomicEmbeddings(model="nomic-embed-text-v1")
langchain_nomic.embeddings.NomicEmbeddings
from langchain_community.document_loaders import IFixitLoader loader =
IFixitLoader("https://www.ifixit.com/Teardown/Banana+Teardown/811")
langchain_community.document_loaders.IFixitLoader
SOURCE = "test" # @param {type:"Query"|"CollectionGroup"|"DocumentReference"|"string"} get_ipython().run_line_magic('pip', 'install -upgrade --quiet langchain-google-datastore') PROJECT_ID = "my-project-id" # @param {type:"string"} get_ipython().system('gcloud config set project {PROJECT_ID}') from google.colab import auth auth.authenticate_user() get_ipython().system('gcloud services enable datastore.googleapis.com') from langchain_core.documents import Document from langchain_google_datastore import DatastoreSaver data = [Document(page_content="Hello, World!")] saver = DatastoreSaver() saver.upsert_documents(data) saver = DatastoreSaver("Collection") saver.upsert_documents(data) doc_ids = ["AnotherCollection/doc_id", "foo/bar"] saver = DatastoreSaver() saver.upsert_documents(documents=data, document_ids=doc_ids) from langchain_google_datastore import DatastoreLoader loader_collection = DatastoreLoader("Collection") loader_subcollection =
DatastoreLoader("Collection/doc/SubCollection")
langchain_google_datastore.DatastoreLoader
import asyncio import os import nest_asyncio import pandas as pd from langchain.docstore.document import Document from langchain_community.agent_toolkits.pandas.base import create_pandas_dataframe_agent from langchain_experimental.autonomous_agents import AutoGPT from langchain_openai import ChatOpenAI nest_asyncio.apply() llm = ChatOpenAI(model_name="gpt-4", temperature=1.0) import os from contextlib import contextmanager from typing import Optional from langchain.agents import tool from langchain_community.tools.file_management.read import ReadFileTool from langchain_community.tools.file_management.write import WriteFileTool ROOT_DIR = "./data/" @contextmanager def pushd(new_dir): """Context manager for changing the current working directory.""" prev_dir = os.getcwd() os.chdir(new_dir) try: yield finally: os.chdir(prev_dir) @tool def process_csv( csv_file_path: str, instructions: str, output_path: Optional[str] = None ) -> str: """Process a CSV by with pandas in a limited REPL.\ Only use this after writing data to disk as a csv file.\ Any figures must be saved to disk to be viewed by the human.\ Instructions should be written in natural language, not code. Assume the dataframe is already loaded.""" with pushd(ROOT_DIR): try: df = pd.read_csv(csv_file_path) except Exception as e: return f"Error: {e}" agent = create_pandas_dataframe_agent(llm, df, max_iterations=30, verbose=True) if output_path is not None: instructions += f" Save output to disk at {output_path}" try: result = agent.run(instructions) return result except Exception as e: return f"Error: {e}" async def async_load_playwright(url: str) -> str: """Load the specified URLs using Playwright and parse using BeautifulSoup.""" from bs4 import BeautifulSoup from playwright.async_api import async_playwright results = "" async with async_playwright() as p: browser = await p.chromium.launch(headless=True) try: page = await browser.new_page() await page.goto(url) page_source = await page.content() soup = BeautifulSoup(page_source, "html.parser") for script in soup(["script", "style"]): script.extract() text = soup.get_text() lines = (line.strip() for line in text.splitlines()) chunks = (phrase.strip() for line in lines for phrase in line.split(" ")) results = "\n".join(chunk for chunk in chunks if chunk) except Exception as e: results = f"Error: {e}" await browser.close() return results def run_async(coro): event_loop = asyncio.get_event_loop() return event_loop.run_until_complete(coro) @tool def browse_web_page(url: str) -> str: """Verbose way to scrape a whole webpage. Likely to cause issues parsing.""" return run_async(async_load_playwright(url)) from langchain.chains.qa_with_sources.loading import ( BaseCombineDocumentsChain, load_qa_with_sources_chain, ) from langchain.tools import BaseTool, DuckDuckGoSearchRun from langchain_text_splitters import RecursiveCharacterTextSplitter from pydantic import Field def _get_text_splitter(): return RecursiveCharacterTextSplitter( chunk_size=500, chunk_overlap=20, length_function=len, ) class WebpageQATool(BaseTool): name = "query_webpage" description = ( "Browse a webpage and retrieve the information relevant to the question." ) text_splitter: RecursiveCharacterTextSplitter = Field( default_factory=_get_text_splitter ) qa_chain: BaseCombineDocumentsChain def _run(self, url: str, question: str) -> str: """Useful for browsing websites and scraping the text information.""" result = browse_web_page.run(url) docs = [Document(page_content=result, metadata={"source": url})] web_docs = self.text_splitter.split_documents(docs) results = [] for i in range(0, len(web_docs), 4): input_docs = web_docs[i : i + 4] window_result = self.qa_chain( {"input_documents": input_docs, "question": question}, return_only_outputs=True, ) results.append(f"Response from window {i} - {window_result}") results_docs = [ Document(page_content="\n".join(results), metadata={"source": url}) ] return self.qa_chain( {"input_documents": results_docs, "question": question}, return_only_outputs=True, ) async def _arun(self, url: str, question: str) -> str: raise NotImplementedError query_website_tool = WebpageQATool(qa_chain=load_qa_with_sources_chain(llm)) import faiss from langchain.docstore import InMemoryDocstore from langchain_community.vectorstores import FAISS from langchain_openai import OpenAIEmbeddings embeddings_model =
OpenAIEmbeddings()
langchain_openai.OpenAIEmbeddings
from langchain.prompts.pipeline import PipelinePromptTemplate from langchain.prompts.prompt import PromptTemplate full_template = """{introduction} {example} {start}""" full_prompt = PromptTemplate.from_template(full_template) introduction_template = """You are impersonating {person}.""" introduction_prompt = PromptTemplate.from_template(introduction_template) example_template = """Here's an example of an interaction: Q: {example_q} A: {example_a}""" example_prompt =
PromptTemplate.from_template(example_template)
langchain.prompts.prompt.PromptTemplate.from_template
from langchain.prompts.chat import ( ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate, ) from langchain_community.chat_models import JinaChat from langchain_core.messages import HumanMessage, SystemMessage chat = JinaChat(temperature=0) messages = [ SystemMessage( content="You are a helpful assistant that translates English to French." ), HumanMessage( content="Translate this sentence from English to French. I love programming." ), ] chat(messages) template = ( "You are a helpful assistant that translates {input_language} to {output_language}." ) system_message_prompt =
SystemMessagePromptTemplate.from_template(template)
langchain.prompts.chat.SystemMessagePromptTemplate.from_template
get_ipython().run_line_magic('pip', 'install --upgrade --quiet elasticsearch == 7.11.0') import getpass import os os.environ["QIANFAN_AK"] = getpass.getpass("Your Qianfan AK:") os.environ["QIANFAN_SK"] = getpass.getpass("Your Qianfan SK:") from langchain_community.document_loaders import TextLoader from langchain_text_splitters import CharacterTextSplitter loader = TextLoader("../../../state_of_the_union.txt") documents = loader.load() text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) docs = text_splitter.split_documents(documents) from langchain_community.embeddings import QianfanEmbeddingsEndpoint embeddings =
QianfanEmbeddingsEndpoint()
langchain_community.embeddings.QianfanEmbeddingsEndpoint
get_ipython().run_line_magic('pip', 'install --upgrade --quiet lark') get_ipython().run_line_magic('pip', 'install --upgrade --quiet libdeeplake') import getpass import os os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:") os.environ["ACTIVELOOP_TOKEN"] = getpass.getpass("Activeloop token:") from langchain_community.vectorstores import DeepLake from langchain_core.documents import Document from langchain_openai import OpenAIEmbeddings embeddings = OpenAIEmbeddings() docs = [ Document( page_content="A bunch of scientists bring back dinosaurs and mayhem breaks loose", metadata={"year": 1993, "rating": 7.7, "genre": "science fiction"}, ), Document( page_content="Leo DiCaprio gets lost in a dream within a dream within a dream within a ...", metadata={"year": 2010, "director": "Christopher Nolan", "rating": 8.2}, ), Document( page_content="A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea", metadata={"year": 2006, "director": "Satoshi Kon", "rating": 8.6}, ), Document( page_content="A bunch of normal-sized women are supremely wholesome and some men pine after them", metadata={"year": 2019, "director": "Greta Gerwig", "rating": 8.3}, ), Document( page_content="Toys come alive and have a blast doing so", metadata={"year": 1995, "genre": "animated"}, ), Document( page_content="Three men walk into the Zone, three men walk out of the Zone", metadata={ "year": 1979, "director": "Andrei Tarkovsky", "genre": "science fiction", "rating": 9.9, }, ), ] username_or_org = "<USERNAME_OR_ORG>" vectorstore = DeepLake.from_documents( docs, embeddings, dataset_path=f"hub://{username_or_org}/self_queery", overwrite=True, ) from langchain.chains.query_constructor.base import AttributeInfo from langchain.retrievers.self_query.base import SelfQueryRetriever from langchain_openai import OpenAI metadata_field_info = [ AttributeInfo( name="genre", description="The genre of the movie", type="string or list[string]", ), AttributeInfo( name="year", description="The year the movie was released", type="integer", ), AttributeInfo( name="director", description="The name of the movie director", type="string", ), AttributeInfo( name="rating", description="A 1-10 rating for the movie", type="float" ), ] document_content_description = "Brief summary of a movie" llm =
OpenAI(temperature=0)
langchain_openai.OpenAI
get_ipython().run_line_magic('pip', 'install --upgrade --quiet tiktoken langchain-openai python-dotenv datasets langchain deeplake beautifulsoup4 html2text ragas') ORG_ID = "..." import getpass import os from langchain.chains import RetrievalQA from langchain.vectorstores.deeplake import DeepLake from langchain_openai import OpenAIChat, OpenAIEmbeddings os.environ["OPENAI_API_KEY"] = getpass.getpass("Enter your OpenAI API token: ") os.environ["ACTIVELOOP_TOKEN"] = getpass.getpass( "Enter your ActiveLoop API token: " ) # Get your API token from https://app.activeloop.ai, click on your profile picture in the top right corner, and select "API Tokens" token = os.getenv("ACTIVELOOP_TOKEN") openai_embeddings = OpenAIEmbeddings() db = DeepLake( dataset_path=f"hub://{ORG_ID}/deeplake-docs-deepmemory", # org_id stands for your username or organization from activeloop embedding=openai_embeddings, runtime={"tensor_db": True}, token=token, read_only=False, ) from urllib.parse import urljoin import requests from bs4 import BeautifulSoup def get_all_links(url): response = requests.get(url) if response.status_code != 200: print(f"Failed to retrieve the page: {url}") return [] soup = BeautifulSoup(response.content, "html.parser") links = [ urljoin(url, a["href"]) for a in soup.find_all("a", href=True) if a["href"] ] return links base_url = "https://docs.deeplake.ai/en/latest/" all_links = get_all_links(base_url) from langchain.document_loaders import AsyncHtmlLoader loader =
AsyncHtmlLoader(all_links)
langchain.document_loaders.AsyncHtmlLoader
import re from typing import Union from langchain.agents import ( AgentExecutor, AgentOutputParser, LLMSingleActionAgent, Tool, ) from langchain.chains import LLMChain from langchain.prompts import StringPromptTemplate from langchain_community.utilities import SerpAPIWrapper from langchain_core.agents import AgentAction, AgentFinish from langchain_openai import OpenAI search = SerpAPIWrapper() search_tool = Tool( name="Search", func=search.run, description="useful for when you need to answer questions about current events", ) def fake_func(inp: str) -> str: return "foo" fake_tools = [ Tool( name=f"foo-{i}", func=fake_func, description=f"a silly function that you can use to get more information about the number {i}", ) for i in range(99) ] ALL_TOOLS = [search_tool] + fake_tools from langchain_community.vectorstores import FAISS from langchain_core.documents import Document from langchain_openai import OpenAIEmbeddings docs = [ Document(page_content=t.description, metadata={"index": i}) for i, t in enumerate(ALL_TOOLS) ] vector_store = FAISS.from_documents(docs, OpenAIEmbeddings()) retriever = vector_store.as_retriever() def get_tools(query): docs = retriever.get_relevant_documents(query) return [ALL_TOOLS[d.metadata["index"]] for d in docs] get_tools("whats the weather?") get_tools("whats the number 13?") template = """Answer the following questions as best you can, but speaking as a pirate might speak. You have access to the following tools: {tools} Use the following format: Question: the input question you must answer Thought: you should always think about what to do Action: the action to take, should be one of [{tool_names}] Action Input: the input to the action Observation: the result of the action ... (this Thought/Action/Action Input/Observation can repeat N times) Thought: I now know the final answer Final Answer: the final answer to the original input question Begin! Remember to speak as a pirate when giving your final answer. Use lots of "Arg"s Question: {input} {agent_scratchpad}""" from typing import Callable class CustomPromptTemplate(StringPromptTemplate): template: str tools_getter: Callable def format(self, **kwargs) -> str: intermediate_steps = kwargs.pop("intermediate_steps") thoughts = "" for action, observation in intermediate_steps: thoughts += action.log thoughts += f"\nObservation: {observation}\nThought: " kwargs["agent_scratchpad"] = thoughts tools = self.tools_getter(kwargs["input"]) kwargs["tools"] = "\n".join( [f"{tool.name}: {tool.description}" for tool in tools] ) kwargs["tool_names"] = ", ".join([tool.name for tool in tools]) return self.template.format(**kwargs) prompt = CustomPromptTemplate( template=template, tools_getter=get_tools, input_variables=["input", "intermediate_steps"], ) class CustomOutputParser(AgentOutputParser): def parse(self, llm_output: str) -> Union[AgentAction, AgentFinish]: if "Final Answer:" in llm_output: return AgentFinish( return_values={"output": llm_output.split("Final Answer:")[-1].strip()}, log=llm_output, ) regex = r"Action\s*\d*\s*:(.*?)\nAction\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)" match = re.search(regex, llm_output, re.DOTALL) if not match: raise ValueError(f"Could not parse LLM output: `{llm_output}`") action = match.group(1).strip() action_input = match.group(2) return AgentAction( tool=action, tool_input=action_input.strip(" ").strip('"'), log=llm_output ) output_parser = CustomOutputParser() llm =
OpenAI(temperature=0)
langchain_openai.OpenAI
from langchain.agents import AgentType, initialize_agent from langchain.chains import LLMMathChain from langchain_core.pydantic_v1 import BaseModel, Field from langchain_core.tools import Tool from langchain_openai import ChatOpenAI get_ipython().run_line_magic('pip', 'install --upgrade --quiet numexpr') llm =
ChatOpenAI(temperature=0, model="gpt-4")
langchain_openai.ChatOpenAI
get_ipython().run_line_magic('pip', "install --upgrade --quiet faiss-gpu # For CUDA 7.5+ Supported GPU's.") get_ipython().run_line_magic('pip', 'install --upgrade --quiet faiss-cpu # For CPU Installation') import getpass import os os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:") from langchain_community.document_loaders import TextLoader from langchain_community.vectorstores import FAISS from langchain_openai import OpenAIEmbeddings from langchain_text_splitters import CharacterTextSplitter loader = TextLoader("../../../extras/modules/state_of_the_union.txt") documents = loader.load() text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) docs = text_splitter.split_documents(documents) embeddings = OpenAIEmbeddings() db = await FAISS.afrom_documents(docs, embeddings) query = "What did the president say about Ketanji Brown Jackson" docs = await db.asimilarity_search(query) print(docs[0].page_content) docs_and_scores = await db.asimilarity_search_with_score(query) docs_and_scores[0] embedding_vector = await embeddings.aembed_query(query) docs_and_scores = await db.asimilarity_search_by_vector(embedding_vector) db.save_local("faiss_index") new_db = FAISS.load_local("faiss_index", embeddings, asynchronous=True) docs = await new_db.asimilarity_search(query) docs[0] from langchain_community.embeddings.huggingface import HuggingFaceEmbeddings pkl = db.serialize_to_bytes() # serializes the faiss index embeddings = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2") db = FAISS.deserialize_from_bytes( embeddings=embeddings, serialized=pkl, asynchronous=True ) # Load the index db1 = await FAISS.afrom_texts(["foo"], embeddings) db2 = await FAISS.afrom_texts(["bar"], embeddings) db1.docstore._dict db2.docstore._dict db1.merge_from(db2) db1.docstore._dict from langchain_core.documents import Document list_of_documents = [ Document(page_content="foo", metadata=dict(page=1)), Document(page_content="bar", metadata=dict(page=1)), Document(page_content="foo", metadata=dict(page=2)), Document(page_content="barbar", metadata=dict(page=2)), Document(page_content="foo", metadata=dict(page=3)), Document(page_content="bar burr", metadata=dict(page=3)), Document(page_content="foo", metadata=dict(page=4)), Document(page_content="bar bruh", metadata=dict(page=4)), ] db =
FAISS.from_documents(list_of_documents, embeddings)
langchain_community.vectorstores.FAISS.from_documents
get_ipython().run_line_magic('pip', 'install --upgrade --quiet pymysql') from langchain.chains import RetrievalQA from langchain_community.document_loaders import ( DirectoryLoader, UnstructuredMarkdownLoader, ) from langchain_community.vectorstores import StarRocks from langchain_community.vectorstores.starrocks import StarRocksSettings from langchain_openai import OpenAI, OpenAIEmbeddings from langchain_text_splitters import TokenTextSplitter update_vectordb = False loader = DirectoryLoader( "./docs", glob="**/*.md", loader_cls=UnstructuredMarkdownLoader ) documents = loader.load() text_splitter = TokenTextSplitter(chunk_size=400, chunk_overlap=50) split_docs = text_splitter.split_documents(documents) update_vectordb = True split_docs[-20] print("# docs = %d, # splits = %d" % (len(documents), len(split_docs))) def gen_starrocks(update_vectordb, embeddings, settings): if update_vectordb: docsearch = StarRocks.from_documents(split_docs, embeddings, config=settings) else: docsearch = StarRocks(embeddings, settings) return docsearch embeddings =
OpenAIEmbeddings()
langchain_openai.OpenAIEmbeddings
import getpass import os os.environ["OPENAI_API_KEY"] = getpass.getpass() from langchain_community.document_loaders import TextLoader from langchain_community.vectorstores import FAISS from langchain_openai import OpenAIEmbeddings from langchain_text_splitters import CharacterTextSplitter loader = TextLoader("../../modules/state_of_the_union.txt") documents = loader.load() text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) docs = text_splitter.split_documents(documents) embeddings = OpenAIEmbeddings() db = FAISS.from_documents(docs, embeddings) query = "What did the president say about Ketanji Brown Jackson" docs = db.similarity_search(query) print(docs[0].page_content) retriever = db.as_retriever() docs = retriever.invoke(query) print(docs[0].page_content) docs_and_scores = db.similarity_search_with_score(query) docs_and_scores[0] embedding_vector = embeddings.embed_query(query) docs_and_scores = db.similarity_search_by_vector(embedding_vector) db.save_local("faiss_index") new_db = FAISS.load_local("faiss_index", embeddings) docs = new_db.similarity_search(query) docs[0] from langchain_community.embeddings.huggingface import HuggingFaceEmbeddings pkl = db.serialize_to_bytes() # serializes the faiss embeddings = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2") db = FAISS.deserialize_from_bytes( embeddings=embeddings, serialized=pkl ) # Load the index db1 =
FAISS.from_texts(["foo"], embeddings)
langchain_community.vectorstores.FAISS.from_texts
get_ipython().run_line_magic('pip', "install --upgrade --quiet langchain-openai 'deeplake[enterprise]' tiktoken") from langchain_community.vectorstores import DeepLake from langchain_openai import OpenAIEmbeddings from langchain_text_splitters import CharacterTextSplitter import getpass import os os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:") activeloop_token = getpass.getpass("activeloop token:") embeddings = OpenAIEmbeddings() from langchain_community.document_loaders import TextLoader loader = TextLoader("../../modules/state_of_the_union.txt") documents = loader.load() text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) docs = text_splitter.split_documents(documents) embeddings = OpenAIEmbeddings() db = DeepLake(dataset_path="./my_deeplake/", embedding=embeddings, overwrite=True) db.add_documents(docs) query = "What did the president say about Ketanji Brown Jackson" docs = db.similarity_search(query) print(docs[0].page_content) db = DeepLake(dataset_path="./my_deeplake/", embedding=embeddings, read_only=True) docs = db.similarity_search(query) from langchain.chains import RetrievalQA from langchain_openai import OpenAIChat qa = RetrievalQA.from_chain_type( llm=OpenAIChat(model="gpt-3.5-turbo"), chain_type="stuff", retriever=db.as_retriever(), ) query = "What did the president say about Ketanji Brown Jackson" qa.run(query) import random for d in docs: d.metadata["year"] = random.randint(2012, 2014) db = DeepLake.from_documents( docs, embeddings, dataset_path="./my_deeplake/", overwrite=True ) db.similarity_search( "What did the president say about Ketanji Brown Jackson", filter={"metadata": {"year": 2013}}, ) db.similarity_search( "What did the president say about Ketanji Brown Jackson?", distance_metric="cos" ) db.max_marginal_relevance_search( "What did the president say about Ketanji Brown Jackson?" ) db.delete_dataset() DeepLake.force_delete_by_path("./my_deeplake") os.environ["ACTIVELOOP_TOKEN"] = activeloop_token username = "<USERNAME_OR_ORG>" # your username on app.activeloop.ai dataset_path = f"hub://{username}/langchain_testing_python" # could be also ./local/path (much faster locally), s3://bucket/path/to/dataset, gcs://path/to/dataset, etc. docs = text_splitter.split_documents(documents) embedding = OpenAIEmbeddings() db =
DeepLake(dataset_path=dataset_path, embedding=embeddings, overwrite=True)
langchain_community.vectorstores.DeepLake
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai') from langchain.evaluation import load_evaluator from langchain_openai import ChatOpenAI evaluator = load_evaluator("labeled_score_string", llm=ChatOpenAI(model="gpt-4")) eval_result = evaluator.evaluate_strings( prediction="You can find them in the dresser's third drawer.", reference="The socks are in the third drawer in the dresser", input="Where are my socks?", ) print(eval_result) accuracy_criteria = { "accuracy": """ Score 1: The answer is completely unrelated to the reference. Score 3: The answer has minor relevance but does not align with the reference. Score 5: The answer has moderate relevance but contains inaccuracies. Score 7: The answer aligns with the reference but has minor errors or omissions. Score 10: The answer is completely accurate and aligns perfectly with the reference.""" } evaluator = load_evaluator( "labeled_score_string", criteria=accuracy_criteria, llm=ChatOpenAI(model="gpt-4"), ) eval_result = evaluator.evaluate_strings( prediction="You can find them in the dresser's third drawer.", reference="The socks are in the third drawer in the dresser", input="Where are my socks?", ) print(eval_result) eval_result = evaluator.evaluate_strings( prediction="You can find them in the dresser.", reference="The socks are in the third drawer in the dresser", input="Where are my socks?", ) print(eval_result) eval_result = evaluator.evaluate_strings( prediction="You can find them in the dog's bed.", reference="The socks are in the third drawer in the dresser", input="Where are my socks?", ) print(eval_result) evaluator = load_evaluator( "labeled_score_string", criteria=accuracy_criteria, llm=
ChatOpenAI(model="gpt-4")
langchain_openai.ChatOpenAI
get_ipython().system('poetry run pip install dgml-utils==0.3.0 --upgrade --quiet') import os from langchain_community.document_loaders import DocugamiLoader DOCUGAMI_API_KEY = os.environ.get("DOCUGAMI_API_KEY") docset_id = "26xpy3aes7xp" document_ids = ["d7jqdzcj50sj", "cgd1eacfkchw"] loader = DocugamiLoader(docset_id=docset_id, document_ids=document_ids) chunks = loader.load() len(chunks) loader.min_text_length = 64 loader.include_xml_tags = True chunks = loader.load() for chunk in chunks[:5]: print(chunk) get_ipython().system('poetry run pip install --upgrade langchain-openai tiktoken chromadb hnswlib') loader = DocugamiLoader(docset_id="zo954yqy53wp") chunks = loader.load() for chunk in chunks: stripped_metadata = chunk.metadata.copy() for key in chunk.metadata: if key not in ["name", "xpath", "id", "structure"]: del stripped_metadata[key] chunk.metadata = stripped_metadata print(len(chunks)) from langchain.chains import RetrievalQA from langchain_community.vectorstores.chroma import Chroma from langchain_openai import OpenAI, OpenAIEmbeddings embedding = OpenAIEmbeddings() vectordb = Chroma.from_documents(documents=chunks, embedding=embedding) retriever = vectordb.as_retriever() qa_chain = RetrievalQA.from_chain_type( llm=OpenAI(), chain_type="stuff", retriever=retriever, return_source_documents=True ) qa_chain("What can tenants do with signage on their properties?") chain_response = qa_chain("What is rentable area for the property owned by DHA Group?") chain_response["result"] # correct answer should be 13,500 sq ft chain_response["source_documents"] loader = DocugamiLoader(docset_id="zo954yqy53wp") loader.include_xml_tags = ( True # for additional semantics from the Docugami knowledge graph ) chunks = loader.load() print(chunks[0].metadata) get_ipython().system('poetry run pip install --upgrade lark --quiet') from langchain.chains.query_constructor.schema import AttributeInfo from langchain.retrievers.self_query.base import SelfQueryRetriever from langchain_community.vectorstores.chroma import Chroma EXCLUDE_KEYS = ["id", "xpath", "structure"] metadata_field_info = [ AttributeInfo( name=key, description=f"The {key} for this chunk", type="string", ) for key in chunks[0].metadata if key.lower() not in EXCLUDE_KEYS ] document_content_description = "Contents of this chunk" llm = OpenAI(temperature=0) vectordb = Chroma.from_documents(documents=chunks, embedding=embedding) retriever = SelfQueryRetriever.from_llm( llm, vectordb, document_content_description, metadata_field_info, verbose=True ) qa_chain = RetrievalQA.from_chain_type( llm=OpenAI(), chain_type="stuff", retriever=retriever, return_source_documents=True, verbose=True, ) qa_chain( "What is rentable area for the property owned by DHA Group?" ) # correct answer should be 13,500 sq ft from typing import Dict, List from langchain_community.document_loaders import DocugamiLoader from langchain_core.documents import Document loader = DocugamiLoader(docset_id="zo954yqy53wp") loader.include_xml_tags = ( True # for additional semantics from the Docugami knowledge graph ) loader.parent_hierarchy_levels = 3 # for expanded context loader.max_text_length = ( 1024 * 8 ) # 8K chars are roughly 2K tokens (ref: https://help.openai.com/en/articles/4936856-what-are-tokens-and-how-to-count-them) loader.include_project_metadata_in_doc_metadata = ( False # Not filtering on vector metadata, so remove to lighten the vectors ) chunks: List[Document] = loader.load() parents_by_id: Dict[str, Document] = {} children_by_id: Dict[str, Document] = {} for chunk in chunks: chunk_id = chunk.metadata.get("id") parent_chunk_id = chunk.metadata.get(loader.parent_id_key) if not parent_chunk_id: parents_by_id[chunk_id] = chunk else: children_by_id[chunk_id] = chunk for id, chunk in list(children_by_id.items())[:5]: parent_chunk_id = chunk.metadata.get(loader.parent_id_key) if parent_chunk_id: print(f"PARENT CHUNK {parent_chunk_id}: {parents_by_id[parent_chunk_id]}") print(f"CHUNK {id}: {chunk}") from langchain.retrievers.multi_vector import MultiVectorRetriever, SearchType from langchain.storage import InMemoryStore from langchain_community.vectorstores.chroma import Chroma from langchain_openai import OpenAIEmbeddings vectorstore = Chroma(collection_name="big2small", embedding_function=
OpenAIEmbeddings()
langchain_openai.OpenAIEmbeddings
import re from typing import Union from langchain.agents import ( AgentExecutor, AgentOutputParser, LLMSingleActionAgent, ) from langchain.chains import LLMChain from langchain.prompts import StringPromptTemplate from langchain_community.agent_toolkits import NLAToolkit from langchain_community.tools.plugin import AIPlugin from langchain_core.agents import AgentAction, AgentFinish from langchain_openai import OpenAI llm = OpenAI(temperature=0) urls = [ "https://datasette.io/.well-known/ai-plugin.json", "https://api.speak.com/.well-known/ai-plugin.json", "https://www.wolframalpha.com/.well-known/ai-plugin.json", "https://www.zapier.com/.well-known/ai-plugin.json", "https://www.klarna.com/.well-known/ai-plugin.json", "https://www.joinmilo.com/.well-known/ai-plugin.json", "https://slack.com/.well-known/ai-plugin.json", "https://schooldigger.com/.well-known/ai-plugin.json", ] AI_PLUGINS = [AIPlugin.from_url(url) for url in urls] from langchain_community.vectorstores import FAISS from langchain_core.documents import Document from langchain_openai import OpenAIEmbeddings embeddings = OpenAIEmbeddings() docs = [ Document( page_content=plugin.description_for_model, metadata={"plugin_name": plugin.name_for_model}, ) for plugin in AI_PLUGINS ] vector_store =
FAISS.from_documents(docs, embeddings)
langchain_community.vectorstores.FAISS.from_documents
get_ipython().run_line_magic('pip', 'install --upgrade --quiet rank_bm25 > /dev/null') from langchain.retrievers import BM25Retriever, EnsembleRetriever from langchain_community.vectorstores import FAISS from langchain_openai import OpenAIEmbeddings doc_list_1 = [ "I like apples", "I like oranges", "Apples and oranges are fruits", ] bm25_retriever = BM25Retriever.from_texts( doc_list_1, metadatas=[{"source": 1}] * len(doc_list_1) ) bm25_retriever.k = 2 doc_list_2 = [ "You like apples", "You like oranges", ] embedding =
OpenAIEmbeddings()
langchain_openai.OpenAIEmbeddings
get_ipython().system(' pip install langchain unstructured[all-docs] pydantic lxml langchainhub') get_ipython().system(' brew install tesseract') get_ipython().system(' brew install poppler') path = "/Users/rlm/Desktop/Papers/LLaMA2/" from typing import Any from pydantic import BaseModel from unstructured.partition.pdf import partition_pdf raw_pdf_elements = partition_pdf( filename=path + "LLaMA2.pdf", extract_images_in_pdf=False, infer_table_structure=True, chunking_strategy="by_title", max_characters=4000, new_after_n_chars=3800, combine_text_under_n_chars=2000, image_output_dir_path=path, ) category_counts = {} for element in raw_pdf_elements: category = str(type(element)) if category in category_counts: category_counts[category] += 1 else: category_counts[category] = 1 unique_categories = set(category_counts.keys()) category_counts class Element(BaseModel): type: str text: Any categorized_elements = [] for element in raw_pdf_elements: if "unstructured.documents.elements.Table" in str(type(element)): categorized_elements.append(Element(type="table", text=str(element))) elif "unstructured.documents.elements.CompositeElement" in str(type(element)): categorized_elements.append(Element(type="text", text=str(element))) table_elements = [e for e in categorized_elements if e.type == "table"] print(len(table_elements)) text_elements = [e for e in categorized_elements if e.type == "text"] print(len(text_elements)) from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate from langchain_openai import ChatOpenAI prompt_text = """You are an assistant tasked with summarizing tables and text. \ Give a concise summary of the table or text. Table or text chunk: {element} """ prompt = ChatPromptTemplate.from_template(prompt_text) model = ChatOpenAI(temperature=0, model="gpt-4") summarize_chain = {"element": lambda x: x} | prompt | model | StrOutputParser() tables = [i.text for i in table_elements] table_summaries = summarize_chain.batch(tables, {"max_concurrency": 5}) texts = [i.text for i in text_elements] text_summaries = summarize_chain.batch(texts, {"max_concurrency": 5}) import uuid from langchain.retrievers.multi_vector import MultiVectorRetriever from langchain.storage import InMemoryStore from langchain_community.vectorstores import Chroma from langchain_core.documents import Document from langchain_openai import OpenAIEmbeddings vectorstore = Chroma(collection_name="summaries", embedding_function=OpenAIEmbeddings()) store = InMemoryStore() id_key = "doc_id" retriever = MultiVectorRetriever( vectorstore=vectorstore, docstore=store, id_key=id_key, ) doc_ids = [str(uuid.uuid4()) for _ in texts] summary_texts = [ Document(page_content=s, metadata={id_key: doc_ids[i]}) for i, s in enumerate(text_summaries) ] retriever.vectorstore.add_documents(summary_texts) retriever.docstore.mset(list(zip(doc_ids, texts))) table_ids = [str(uuid.uuid4()) for _ in tables] summary_tables = [
Document(page_content=s, metadata={id_key: table_ids[i]})
langchain_core.documents.Document
get_ipython().run_line_magic('pip', 'install --upgrade --quiet azureml-mlflow') get_ipython().run_line_magic('pip', 'install --upgrade --quiet pandas') get_ipython().run_line_magic('pip', 'install --upgrade --quiet textstat') get_ipython().run_line_magic('pip', 'install --upgrade --quiet spacy') get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-openai') get_ipython().run_line_magic('pip', 'install --upgrade --quiet google-search-results') get_ipython().system('python -m spacy download en_core_web_sm') import os os.environ["MLFLOW_TRACKING_URI"] = "" os.environ["OPENAI_API_KEY"] = "" os.environ["SERPAPI_API_KEY"] = "" from langchain.callbacks import MlflowCallbackHandler from langchain_openai import OpenAI """Main function. This function is used to try the callback handler. Scenarios: 1. OpenAI LLM 2. Chain with multiple SubChains on multiple generations 3. Agent with Tools """ mlflow_callback = MlflowCallbackHandler() llm = OpenAI( model_name="gpt-3.5-turbo", temperature=0, callbacks=[mlflow_callback], verbose=True ) llm_result = llm.generate(["Tell me a joke"]) mlflow_callback.flush_tracker(llm) from langchain.chains import LLMChain from langchain.prompts import PromptTemplate template = """You are a playwright. Given the title of play, it is your job to write a synopsis for that title. Title: {title} Playwright: This is a synopsis for the above play:""" prompt_template =
PromptTemplate(input_variables=["title"], template=template)
langchain.prompts.PromptTemplate
from langchain_community.utilities import DuckDuckGoSearchAPIWrapper from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate from langchain_core.runnables import RunnablePassthrough from langchain_openai import ChatOpenAI template = """Answer the users question based only on the following context: <context> {context} </context> Question: {question} """ prompt = ChatPromptTemplate.from_template(template) model = ChatOpenAI(temperature=0) search = DuckDuckGoSearchAPIWrapper() def retriever(query): return search.run(query) chain = ( {"context": retriever, "question": RunnablePassthrough()} | prompt | model | StrOutputParser() ) simple_query = "what is langchain?" chain.invoke(simple_query) distracted_query = "man that sam bankman fried trial was crazy! what is langchain?" chain.invoke(distracted_query) retriever(distracted_query) template = """Provide a better search query for \ web search engine to answer the given question, end \ the queries with ’**’. Question: \ {x} Answer:""" rewrite_prompt =
ChatPromptTemplate.from_template(template)
langchain_core.prompts.ChatPromptTemplate.from_template
get_ipython().run_line_magic('pip', 'install --upgrade --quiet predictionguard langchain') import os from langchain.chains import LLMChain from langchain.prompts import PromptTemplate from langchain_community.llms import PredictionGuard os.environ["OPENAI_API_KEY"] = "<your OpenAI api key>" os.environ["PREDICTIONGUARD_TOKEN"] = "<your Prediction Guard access token>" pgllm = PredictionGuard(model="OpenAI-text-davinci-003") pgllm("Tell me a joke") template = """Respond to the following query based on the context. Context: EVERY comment, DM + email suggestion has led us to this EXCITING announcement! 🎉 We have officially added TWO new candle subscription box options! 📦 Exclusive Candle Box - $80 Monthly Candle Box - $45 (NEW!) Scent of The Month Box - $28 (NEW!) Head to stories to get ALLL the deets on each box! 👆 BONUS: Save 50% on your first box with code 50OFF! 🎉 Query: {query} Result: """ prompt = PromptTemplate.from_template(template) pgllm(prompt.format(query="What kind of post is this?")) pgllm = PredictionGuard( model="OpenAI-text-davinci-003", output={ "type": "categorical", "categories": ["product announcement", "apology", "relational"], }, ) pgllm(prompt.format(query="What kind of post is this?")) pgllm = PredictionGuard(model="OpenAI-text-davinci-003") template = """Question: {question} Answer: Let's think step by step.""" prompt = PromptTemplate.from_template(template) llm_chain = LLMChain(prompt=prompt, llm=pgllm, verbose=True) question = "What NFL team won the Super Bowl in the year Justin Beiber was born?" llm_chain.predict(question=question) template = """Write a {adjective} poem about {subject}.""" prompt = PromptTemplate.from_template(template) llm_chain =
LLMChain(prompt=prompt, llm=pgllm, verbose=True)
langchain.chains.LLMChain
from langchain.indexes import SQLRecordManager, index from langchain_core.documents import Document from langchain_elasticsearch import ElasticsearchStore from langchain_openai import OpenAIEmbeddings collection_name = "test_index" embedding = OpenAIEmbeddings() vectorstore = ElasticsearchStore( es_url="http://localhost:9200", index_name="test_index", embedding=embedding ) namespace = f"elasticsearch/{collection_name}" record_manager = SQLRecordManager( namespace, db_url="sqlite:///record_manager_cache.sql" ) record_manager.create_schema() doc1 = Document(page_content="kitty", metadata={"source": "kitty.txt"}) doc2 = Document(page_content="doggy", metadata={"source": "doggy.txt"}) def _clear(): """Hacky helper method to clear content. See the `full` mode section to to understand why it works.""" index([], record_manager, vectorstore, cleanup="full", source_id_key="source") _clear() index( [doc1, doc1, doc1, doc1, doc1], record_manager, vectorstore, cleanup=None, source_id_key="source", ) _clear() index([doc1, doc2], record_manager, vectorstore, cleanup=None, source_id_key="source") index([doc1, doc2], record_manager, vectorstore, cleanup=None, source_id_key="source") _clear() index( [doc1, doc2], record_manager, vectorstore, cleanup="incremental", source_id_key="source", ) index( [doc1, doc2], record_manager, vectorstore, cleanup="incremental", source_id_key="source", ) index([], record_manager, vectorstore, cleanup="incremental", source_id_key="source") changed_doc_2 = Document(page_content="puppy", metadata={"source": "doggy.txt"}) index( [changed_doc_2], record_manager, vectorstore, cleanup="incremental", source_id_key="source", ) _clear() all_docs = [doc1, doc2] index(all_docs, record_manager, vectorstore, cleanup="full", source_id_key="source") del all_docs[0] all_docs
index(all_docs, record_manager, vectorstore, cleanup="full", source_id_key="source")
langchain.indexes.index
get_ipython().system(' pip install langchain unstructured[all-docs] pydantic lxml') path = "/Users/rlm/Desktop/Papers/LLaVA/" from typing import Any from pydantic import BaseModel from unstructured.partition.pdf import partition_pdf raw_pdf_elements = partition_pdf( filename=path + "LLaVA.pdf", extract_images_in_pdf=True, infer_table_structure=True, chunking_strategy="by_title", max_characters=4000, new_after_n_chars=3800, combine_text_under_n_chars=2000, image_output_dir_path=path, ) category_counts = {} for element in raw_pdf_elements: category = str(type(element)) if category in category_counts: category_counts[category] += 1 else: category_counts[category] = 1 unique_categories = set(category_counts.keys()) category_counts class Element(BaseModel): type: str text: Any categorized_elements = [] for element in raw_pdf_elements: if "unstructured.documents.elements.Table" in str(type(element)): categorized_elements.append(Element(type="table", text=str(element))) elif "unstructured.documents.elements.CompositeElement" in str(type(element)): categorized_elements.append(Element(type="text", text=str(element))) table_elements = [e for e in categorized_elements if e.type == "table"] print(len(table_elements)) text_elements = [e for e in categorized_elements if e.type == "text"] print(len(text_elements)) from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate from langchain_openai import ChatOpenAI prompt_text = """You are an assistant tasked with summarizing tables and text. \ Give a concise summary of the table or text. Table or text chunk: {element} """ prompt = ChatPromptTemplate.from_template(prompt_text) model = ChatOpenAI(temperature=0, model="gpt-4") summarize_chain = {"element": lambda x: x} | prompt | model | StrOutputParser() texts = [i.text for i in text_elements] text_summaries = summarize_chain.batch(texts, {"max_concurrency": 5}) tables = [i.text for i in table_elements] table_summaries = summarize_chain.batch(tables, {"max_concurrency": 5}) get_ipython().run_cell_magic('bash', '', '\n# Define the directory containing the images\nIMG_DIR=~/Desktop/Papers/LLaVA/\n\n# Loop through each image in the directory\nfor img in "${IMG_DIR}"*.jpg; do\n # Extract the base name of the image without extension\n base_name=$(basename "$img" .jpg)\n\n # Define the output file name based on the image name\n output_file="${IMG_DIR}${base_name}.txt"\n\n # Execute the command and save the output to the defined output file\n /Users/rlm/Desktop/Code/llama.cpp/bin/llava -m ../models/llava-7b/ggml-model-q5_k.gguf --mmproj ../models/llava-7b/mmproj-model-f16.gguf --temp 0.1 -p "Describe the image in detail. Be specific about graphs, such as bar plots." --image "$img" > "$output_file"\n\ndone\n') import glob import os file_paths = glob.glob(os.path.expanduser(os.path.join(path, "*.txt"))) img_summaries = [] for file_path in file_paths: with open(file_path, "r") as file: img_summaries.append(file.read()) logging_header = "clip_model_load: total allocated memory: 201.27 MB\n\n" cleaned_img_summary = [s.split(logging_header, 1)[1].strip() for s in img_summaries] import uuid from langchain.retrievers.multi_vector import MultiVectorRetriever from langchain.storage import InMemoryStore from langchain_community.vectorstores import Chroma from langchain_core.documents import Document from langchain_openai import OpenAIEmbeddings vectorstore = Chroma(collection_name="summaries", embedding_function=OpenAIEmbeddings()) store = InMemoryStore() id_key = "doc_id" retriever = MultiVectorRetriever( vectorstore=vectorstore, docstore=store, id_key=id_key, ) doc_ids = [str(uuid.uuid4()) for _ in texts] summary_texts = [ Document(page_content=s, metadata={id_key: doc_ids[i]}) for i, s in enumerate(text_summaries) ] retriever.vectorstore.add_documents(summary_texts) retriever.docstore.mset(list(zip(doc_ids, texts))) table_ids = [str(uuid.uuid4()) for _ in tables] summary_tables = [ Document(page_content=s, metadata={id_key: table_ids[i]}) for i, s in enumerate(table_summaries) ] retriever.vectorstore.add_documents(summary_tables) retriever.docstore.mset(list(zip(table_ids, tables))) img_ids = [str(uuid.uuid4()) for _ in cleaned_img_summary] summary_img = [ Document(page_content=s, metadata={id_key: img_ids[i]}) for i, s in enumerate(cleaned_img_summary) ] retriever.vectorstore.add_documents(summary_img) retriever.docstore.mset(list(zip(img_ids, cleaned_img_summary))) img_ids = [str(uuid.uuid4()) for _ in cleaned_img_summary] summary_img = [ Document(page_content=s, metadata={id_key: img_ids[i]}) for i, s in enumerate(cleaned_img_summary) ] retriever.vectorstore.add_documents(summary_img) retriever.docstore.mset( list( zip( img_ids, ) ) ) tables[2] table_summaries[2] retriever.get_relevant_documents( "What are results for LLaMA across across domains / subjects?" )[1] retriever.get_relevant_documents("Images / figures with playful and creative examples")[ 1 ] from langchain_core.runnables import RunnablePassthrough template = """Answer the question based only on the following context, which can include text and tables: {context} Question: {question} """ prompt = ChatPromptTemplate.from_template(template) model = ChatOpenAI(temperature=0, model="gpt-4") chain = ( {"context": retriever, "question":
RunnablePassthrough()
langchain_core.runnables.RunnablePassthrough
get_ipython().run_line_magic('pip', 'install --upgrade --quiet pdfminer') from langchain_community.document_loaders.image import UnstructuredImageLoader loader = UnstructuredImageLoader("layout-parser-paper-fast.jpg") data = loader.load() data[0] loader =
UnstructuredImageLoader("layout-parser-paper-fast.jpg", mode="elements")
langchain_community.document_loaders.image.UnstructuredImageLoader
get_ipython().run_line_magic('pip', 'install --upgrade --quiet playwright > /dev/null') get_ipython().run_line_magic('pip', 'install --upgrade --quiet lxml') from langchain_community.agent_toolkits import PlayWrightBrowserToolkit from langchain_community.tools.playwright.utils import ( create_async_playwright_browser, # A synchronous browser is available, though it isn't compatible with jupyter.\n", }, ) import nest_asyncio nest_asyncio.apply() async_browser = create_async_playwright_browser() toolkit =
PlayWrightBrowserToolkit.from_browser(async_browser=async_browser)
langchain_community.agent_toolkits.PlayWrightBrowserToolkit.from_browser
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai') from langchain_core.runnables import RunnableParallel, RunnablePassthrough runnable = RunnableParallel( passed=RunnablePassthrough(), extra=RunnablePassthrough.assign(mult=lambda x: x["num"] * 3), modified=lambda x: x["num"] + 1, ) runnable.invoke({"num": 1}) from langchain_community.vectorstores import FAISS from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate from langchain_core.runnables import RunnablePassthrough from langchain_openai import ChatOpenAI, OpenAIEmbeddings vectorstore = FAISS.from_texts( ["harrison worked at kensho"], embedding=OpenAIEmbeddings() ) retriever = vectorstore.as_retriever() template = """Answer the question based only on the following context: {context} Question: {question} """ prompt = ChatPromptTemplate.from_template(template) model = ChatOpenAI() retrieval_chain = ( {"context": retriever, "question":
RunnablePassthrough()
langchain_core.runnables.RunnablePassthrough
get_ipython().run_line_magic('pip', 'install --upgrade --quiet scikit-learn') get_ipython().run_line_magic('pip', 'install --upgrade --quiet bson') get_ipython().run_line_magic('pip', 'install --upgrade --quiet pandas pyarrow') import os from getpass import getpass os.environ["OPENAI_API_KEY"] = getpass("Enter your OpenAI key:") from langchain_community.document_loaders import TextLoader from langchain_community.vectorstores import SKLearnVectorStore from langchain_openai import OpenAIEmbeddings from langchain_text_splitters import CharacterTextSplitter loader =
TextLoader("../../modules/state_of_the_union.txt")
langchain_community.document_loaders.TextLoader
get_ipython().system('pip3 install tcvectordb') from langchain_community.document_loaders import TextLoader from langchain_community.embeddings.fake import FakeEmbeddings from langchain_community.vectorstores import TencentVectorDB from langchain_community.vectorstores.tencentvectordb import ConnectionParams from langchain_text_splitters import CharacterTextSplitter loader = TextLoader("../../modules/state_of_the_union.txt") documents = loader.load() text_splitter =
CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
langchain_text_splitters.CharacterTextSplitter
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai context-python') import os from langchain.callbacks import ContextCallbackHandler token = os.environ["CONTEXT_API_TOKEN"] context_callback =
ContextCallbackHandler(token)
langchain.callbacks.ContextCallbackHandler
get_ipython().run_line_magic('pip', 'install -U --quiet langchain langchain_community openai chromadb langchain-experimental') get_ipython().run_line_magic('pip', 'install --quiet "unstructured[all-docs]" pypdf pillow pydantic lxml pillow matplotlib chromadb tiktoken') import logging import zipfile import requests logging.basicConfig(level=logging.INFO) data_url = "https://storage.googleapis.com/benchmarks-artifacts/langchain-docs-benchmarking/cj.zip" result = requests.get(data_url) filename = "cj.zip" with open(filename, "wb") as file: file.write(result.content) with zipfile.ZipFile(filename, "r") as zip_ref: zip_ref.extractall() from langchain_community.document_loaders import PyPDFLoader loader = PyPDFLoader("./cj/cj.pdf") docs = loader.load() tables = [] texts = [d.page_content for d in docs] len(texts) from langchain.prompts import PromptTemplate from langchain_community.chat_models import ChatVertexAI from langchain_community.llms import VertexAI from langchain_core.messages import AIMessage from langchain_core.output_parsers import StrOutputParser from langchain_core.runnables import RunnableLambda def generate_text_summaries(texts, tables, summarize_texts=False): """ Summarize text elements texts: List of str tables: List of str summarize_texts: Bool to summarize texts """ prompt_text = """You are an assistant tasked with summarizing tables and text for retrieval. \ These summaries will be embedded and used to retrieve the raw text or table elements. \ Give a concise summary of the table or text that is well optimized for retrieval. Table or text: {element} """ prompt = PromptTemplate.from_template(prompt_text) empty_response = RunnableLambda( lambda x: AIMessage(content="Error processing document") ) model = VertexAI( temperature=0, model_name="gemini-pro", max_output_tokens=1024 ).with_fallbacks([empty_response]) summarize_chain = {"element": lambda x: x} | prompt | model | StrOutputParser() text_summaries = [] table_summaries = [] if texts and summarize_texts: text_summaries = summarize_chain.batch(texts, {"max_concurrency": 1}) elif texts: text_summaries = texts if tables: table_summaries = summarize_chain.batch(tables, {"max_concurrency": 1}) return text_summaries, table_summaries text_summaries, table_summaries = generate_text_summaries( texts, tables, summarize_texts=True ) len(text_summaries) import base64 import os from langchain_core.messages import HumanMessage def encode_image(image_path): """Getting the base64 string""" with open(image_path, "rb") as image_file: return base64.b64encode(image_file.read()).decode("utf-8") def image_summarize(img_base64, prompt): """Make image summary""" model = ChatVertexAI(model_name="gemini-pro-vision", max_output_tokens=1024) msg = model( [ HumanMessage( content=[ {"type": "text", "text": prompt}, { "type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{img_base64}"}, }, ] ) ] ) return msg.content def generate_img_summaries(path): """ Generate summaries and base64 encoded strings for images path: Path to list of .jpg files extracted by Unstructured """ img_base64_list = [] image_summaries = [] prompt = """You are an assistant tasked with summarizing images for retrieval. \ These summaries will be embedded and used to retrieve the raw image. \ Give a concise summary of the image that is well optimized for retrieval.""" for img_file in sorted(os.listdir(path)): if img_file.endswith(".jpg"): img_path = os.path.join(path, img_file) base64_image = encode_image(img_path) img_base64_list.append(base64_image) image_summaries.append(image_summarize(base64_image, prompt)) return img_base64_list, image_summaries img_base64_list, image_summaries = generate_img_summaries("./cj") len(image_summaries) import uuid from langchain.retrievers.multi_vector import MultiVectorRetriever from langchain.storage import InMemoryStore from langchain_community.embeddings import VertexAIEmbeddings from langchain_community.vectorstores import Chroma from langchain_core.documents import Document def create_multi_vector_retriever( vectorstore, text_summaries, texts, table_summaries, tables, image_summaries, images ): """ Create retriever that indexes summaries, but returns raw images or texts """ store = InMemoryStore() id_key = "doc_id" retriever = MultiVectorRetriever( vectorstore=vectorstore, docstore=store, id_key=id_key, ) def add_documents(retriever, doc_summaries, doc_contents): doc_ids = [str(uuid.uuid4()) for _ in doc_contents] summary_docs = [ Document(page_content=s, metadata={id_key: doc_ids[i]}) for i, s in enumerate(doc_summaries) ] retriever.vectorstore.add_documents(summary_docs) retriever.docstore.mset(list(zip(doc_ids, doc_contents))) if text_summaries: add_documents(retriever, text_summaries, texts) if table_summaries: add_documents(retriever, table_summaries, tables) if image_summaries: add_documents(retriever, image_summaries, images) return retriever vectorstore = Chroma( collection_name="mm_rag_cj_blog", embedding_function=VertexAIEmbeddings(model_name="textembedding-gecko@latest"), ) retriever_multi_vector_img = create_multi_vector_retriever( vectorstore, text_summaries, texts, table_summaries, tables, image_summaries, img_base64_list, ) import io import re from IPython.display import HTML, display from langchain_core.runnables import RunnableLambda, RunnablePassthrough from PIL import Image def plt_img_base64(img_base64): """Disply base64 encoded string as image""" image_html = f'<img src="data:image/jpeg;base64,{img_base64}" />' display(HTML(image_html)) def looks_like_base64(sb): """Check if the string looks like base64""" return re.match("^[A-Za-z0-9+/]+[=]{0,2}$", sb) is not None def is_image_data(b64data): """ Check if the base64 data is an image by looking at the start of the data """ image_signatures = { b"\xFF\xD8\xFF": "jpg", b"\x89\x50\x4E\x47\x0D\x0A\x1A\x0A": "png", b"\x47\x49\x46\x38": "gif", b"\x52\x49\x46\x46": "webp", } try: header = base64.b64decode(b64data)[:8] # Decode and get the first 8 bytes for sig, format in image_signatures.items(): if header.startswith(sig): return True return False except Exception: return False def resize_base64_image(base64_string, size=(128, 128)): """ Resize an image encoded as a Base64 string """ img_data = base64.b64decode(base64_string) img = Image.open(io.BytesIO(img_data)) resized_img = img.resize(size, Image.LANCZOS) buffered = io.BytesIO() resized_img.save(buffered, format=img.format) return base64.b64encode(buffered.getvalue()).decode("utf-8") def split_image_text_types(docs): """ Split base64-encoded images and texts """ b64_images = [] texts = [] for doc in docs: if isinstance(doc, Document): doc = doc.page_content if looks_like_base64(doc) and is_image_data(doc): doc = resize_base64_image(doc, size=(1300, 600)) b64_images.append(doc) else: texts.append(doc) if len(b64_images) > 0: return {"images": b64_images[:1], "texts": []} return {"images": b64_images, "texts": texts} def img_prompt_func(data_dict): """ Join the context into a single string """ formatted_texts = "\n".join(data_dict["context"]["texts"]) messages = [] text_message = { "type": "text", "text": ( "You are financial analyst tasking with providing investment advice.\n" "You will be given a mixed of text, tables, and image(s) usually of charts or graphs.\n" "Use this information to provide investment advice related to the user question. \n" f"User-provided question: {data_dict['question']}\n\n" "Text and / or tables:\n" f"{formatted_texts}" ), } messages.append(text_message) if data_dict["context"]["images"]: for image in data_dict["context"]["images"]: image_message = { "type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{image}"}, } messages.append(image_message) return [
HumanMessage(content=messages)
langchain_core.messages.HumanMessage
from langchain.indexes import SQLRecordManager, index from langchain_core.documents import Document from langchain_elasticsearch import ElasticsearchStore from langchain_openai import OpenAIEmbeddings collection_name = "test_index" embedding = OpenAIEmbeddings() vectorstore = ElasticsearchStore( es_url="http://localhost:9200", index_name="test_index", embedding=embedding ) namespace = f"elasticsearch/{collection_name}" record_manager = SQLRecordManager( namespace, db_url="sqlite:///record_manager_cache.sql" ) record_manager.create_schema() doc1 = Document(page_content="kitty", metadata={"source": "kitty.txt"}) doc2 = Document(page_content="doggy", metadata={"source": "doggy.txt"}) def _clear(): """Hacky helper method to clear content. See the `full` mode section to to understand why it works.""" index([], record_manager, vectorstore, cleanup="full", source_id_key="source") _clear() index( [doc1, doc1, doc1, doc1, doc1], record_manager, vectorstore, cleanup=None, source_id_key="source", ) _clear() index([doc1, doc2], record_manager, vectorstore, cleanup=None, source_id_key="source") index([doc1, doc2], record_manager, vectorstore, cleanup=None, source_id_key="source") _clear() index( [doc1, doc2], record_manager, vectorstore, cleanup="incremental", source_id_key="source", ) index( [doc1, doc2], record_manager, vectorstore, cleanup="incremental", source_id_key="source", )
index([], record_manager, vectorstore, cleanup="incremental", source_id_key="source")
langchain.indexes.index
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder from langchain_openai.chat_models import ChatOpenAI model = ChatOpenAI() prompt = ChatPromptTemplate.from_messages( [ ( "system", "You're an assistant who's good at {ability}. Respond in 20 words or fewer", ), MessagesPlaceholder(variable_name="history"), ("human", "{input}"), ] ) runnable = prompt | model from langchain_community.chat_message_histories import ChatMessageHistory from langchain_core.chat_history import BaseChatMessageHistory from langchain_core.runnables.history import RunnableWithMessageHistory store = {} def get_session_history(session_id: str) -> BaseChatMessageHistory: if session_id not in store: store[session_id] = ChatMessageHistory() return store[session_id] with_message_history = RunnableWithMessageHistory( runnable, get_session_history, input_messages_key="input", history_messages_key="history", ) with_message_history.invoke( {"ability": "math", "input": "What does cosine mean?"}, config={"configurable": {"session_id": "abc123"}}, ) with_message_history.invoke( {"ability": "math", "input": "What?"}, config={"configurable": {"session_id": "abc123"}}, ) with_message_history.invoke( {"ability": "math", "input": "What?"}, config={"configurable": {"session_id": "def234"}}, ) from langchain_core.runnables import ConfigurableFieldSpec store = {} def get_session_history(user_id: str, conversation_id: str) -> BaseChatMessageHistory: if (user_id, conversation_id) not in store: store[(user_id, conversation_id)] = ChatMessageHistory() return store[(user_id, conversation_id)] with_message_history = RunnableWithMessageHistory( runnable, get_session_history, input_messages_key="input", history_messages_key="history", history_factory_config=[ ConfigurableFieldSpec( id="user_id", annotation=str, name="User ID", description="Unique identifier for the user.", default="", is_shared=True, ), ConfigurableFieldSpec( id="conversation_id", annotation=str, name="Conversation ID", description="Unique identifier for the conversation.", default="", is_shared=True, ), ], ) with_message_history.invoke( {"ability": "math", "input": "Hello"}, config={"configurable": {"user_id": "123", "conversation_id": "1"}}, ) from langchain_core.messages import HumanMessage from langchain_core.runnables import RunnableParallel chain = RunnableParallel({"output_message": ChatOpenAI()}) def get_session_history(session_id: str) -> BaseChatMessageHistory: if session_id not in store: store[session_id] = ChatMessageHistory() return store[session_id] with_message_history = RunnableWithMessageHistory( chain, get_session_history, output_messages_key="output_message", ) with_message_history.invoke( [HumanMessage(content="What did Simone de Beauvoir believe about free will")], config={"configurable": {"session_id": "baz"}}, ) with_message_history.invoke( [HumanMessage(content="How did this compare to Sartre")], config={"configurable": {"session_id": "baz"}}, ) RunnableWithMessageHistory(
ChatOpenAI()
langchain_openai.chat_models.ChatOpenAI
get_ipython().run_line_magic('pip', 'install --upgrade --quiet tiktoken langchain-openai python-dotenv datasets langchain deeplake beautifulsoup4 html2text ragas') ORG_ID = "..." import getpass import os from langchain.chains import RetrievalQA from langchain.vectorstores.deeplake import DeepLake from langchain_openai import OpenAIChat, OpenAIEmbeddings os.environ["OPENAI_API_KEY"] = getpass.getpass("Enter your OpenAI API token: ") os.environ["ACTIVELOOP_TOKEN"] = getpass.getpass( "Enter your ActiveLoop API token: " ) # Get your API token from https://app.activeloop.ai, click on your profile picture in the top right corner, and select "API Tokens" token = os.getenv("ACTIVELOOP_TOKEN") openai_embeddings = OpenAIEmbeddings() db = DeepLake( dataset_path=f"hub://{ORG_ID}/deeplake-docs-deepmemory", # org_id stands for your username or organization from activeloop embedding=openai_embeddings, runtime={"tensor_db": True}, token=token, read_only=False, ) from urllib.parse import urljoin import requests from bs4 import BeautifulSoup def get_all_links(url): response = requests.get(url) if response.status_code != 200: print(f"Failed to retrieve the page: {url}") return [] soup = BeautifulSoup(response.content, "html.parser") links = [ urljoin(url, a["href"]) for a in soup.find_all("a", href=True) if a["href"] ] return links base_url = "https://docs.deeplake.ai/en/latest/" all_links = get_all_links(base_url) from langchain.document_loaders import AsyncHtmlLoader loader = AsyncHtmlLoader(all_links) docs = loader.load() from langchain.document_transformers import Html2TextTransformer html2text = Html2TextTransformer() docs_transformed = html2text.transform_documents(docs) from langchain_text_splitters import RecursiveCharacterTextSplitter chunk_size = 4096 docs_new = [] text_splitter = RecursiveCharacterTextSplitter( chunk_size=chunk_size, ) for doc in docs_transformed: if len(doc.page_content) < chunk_size: docs_new.append(doc) else: docs = text_splitter.create_documents([doc.page_content]) docs_new.extend(docs) docs = db.add_documents(docs_new) from typing import List from langchain.chains.openai_functions import ( create_structured_output_chain, ) from langchain_core.messages import HumanMessage, SystemMessage from langchain_core.prompts import ChatPromptTemplate, HumanMessagePromptTemplate from langchain_openai import ChatOpenAI from pydantic import BaseModel, Field docs = db.vectorstore.dataset.text.data(fetch_chunks=True, aslist=True)["value"] ids = db.vectorstore.dataset.id.data(fetch_chunks=True, aslist=True)["value"] llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0) class Questions(BaseModel): """Identifying information about a person.""" question: str = Field(..., description="Questions about text") prompt_msgs = [ SystemMessage( content="You are a world class expert for generating questions based on provided context. \ You make sure the question can be answered by the text." ), HumanMessagePromptTemplate.from_template( "Use the given text to generate a question from the following input: {input}" ), HumanMessage(content="Tips: Make sure to answer in the correct format"), ] prompt = ChatPromptTemplate(messages=prompt_msgs) chain = create_structured_output_chain(Questions, llm, prompt, verbose=True) text = "# Understanding Hallucinations and Bias ## **Introduction** In this lesson, we'll cover the concept of **hallucinations** in LLMs, highlighting their influence on AI applications and demonstrating how to mitigate them using techniques like the retriever's architectures. We'll also explore **bias** within LLMs with examples." questions = chain.run(input=text) print(questions) import random from langchain_openai import OpenAIEmbeddings from tqdm import tqdm def generate_queries(docs: List[str], ids: List[str], n: int = 100): questions = [] relevances = [] pbar = tqdm(total=n) while len(questions) < n: r = random.randint(0, len(docs) - 1) text, label = docs[r], ids[r] generated_qs = [chain.run(input=text).question] questions.extend(generated_qs) relevances.extend([[(label, 1)] for _ in generated_qs]) pbar.update(len(generated_qs)) if len(questions) % 10 == 0: print(f"q: {len(questions)}") return questions[:n], relevances[:n] chain = create_structured_output_chain(Questions, llm, prompt, verbose=False) questions, relevances = generate_queries(docs, ids, n=200) train_questions, train_relevances = questions[:100], relevances[:100] test_questions, test_relevances = questions[100:], relevances[100:] job_id = db.vectorstore.deep_memory.train( queries=train_questions, relevance=train_relevances, ) db.vectorstore.deep_memory.status("6538939ca0b69a9ca45c528c") recall = db.vectorstore.deep_memory.evaluate( queries=test_questions, relevance=test_relevances, ) from ragas.langchain import RagasEvaluatorChain from ragas.metrics import ( context_recall, ) def convert_relevance_to_ground_truth(docs, relevance): ground_truths = [] for rel in relevance: ground_truth = [] for doc_id, _ in rel: ground_truth.append(docs[doc_id]) ground_truths.append(ground_truth) return ground_truths ground_truths = convert_relevance_to_ground_truth(docs, test_relevances) for deep_memory in [False, True]: print("\nEvaluating with deep_memory =", deep_memory) print("===================================") retriever = db.as_retriever() retriever.search_kwargs["deep_memory"] = deep_memory qa_chain = RetrievalQA.from_chain_type( llm=OpenAIChat(model="gpt-3.5-turbo"), chain_type="stuff", retriever=retriever, return_source_documents=True, ) metrics = { "context_recall_score": 0, } eval_chains = {m.name: RagasEvaluatorChain(metric=m) for m in [context_recall]} for question, ground_truth in zip(test_questions, ground_truths): result = qa_chain({"query": question}) result["ground_truths"] = ground_truth for name, eval_chain in eval_chains.items(): score_name = f"{name}_score" metrics[score_name] += eval_chain(result)[score_name] for metric in metrics: metrics[metric] /= len(test_questions) print(f"{metric}: {metrics[metric]}") print("===================================") retriever = db.as_retriever() retriever.search_kwargs["deep_memory"] = True retriever.search_kwargs["k"] = 10 query = "Deamination of cytidine to uridine on the minus strand of viral DNA results in catastrophic G-to-A mutations in the viral genome." qa = RetrievalQA.from_chain_type( llm=OpenAIChat(model="gpt-4"), chain_type="stuff", retriever=retriever ) print(qa.run(query)) retriever = db.as_retriever() retriever.search_kwargs["deep_memory"] = False retriever.search_kwargs["k"] = 10 query = "Deamination of cytidine to uridine on the minus strand of viral DNA results in catastrophic G-to-A mutations in the viral genome." qa = RetrievalQA.from_chain_type( llm=
OpenAIChat(model="gpt-4")
langchain_openai.OpenAIChat
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai') from langchain.model_laboratory import ModelLaboratory from langchain.prompts import PromptTemplate from langchain_community.llms import Cohere, HuggingFaceHub from langchain_openai import OpenAI import getpass import os os.environ["COHERE_API_KEY"] = getpass.getpass("Cohere API Key:") os.environ["OPENAI_API_KEY"] = getpass.getpass("Open API Key:") os.environ["HUGGINGFACEHUB_API_TOKEN"] = getpass.getpass("Hugging Face API Key:") llms = [ OpenAI(temperature=0),
Cohere(temperature=0)
langchain_community.llms.Cohere
import os from langchain.chains import LLMChain from langchain.prompts import PromptTemplate from langchain_community.llms import ForefrontAI from getpass import getpass FOREFRONTAI_API_KEY = getpass() os.environ["FOREFRONTAI_API_KEY"] = FOREFRONTAI_API_KEY llm =
ForefrontAI(endpoint_url="YOUR ENDPOINT URL HERE")
langchain_community.llms.ForefrontAI
get_ipython().run_line_magic('pip', 'install --upgrade --quiet youtube_search') from langchain.tools import YouTubeSearchTool tool =
YouTubeSearchTool()
langchain.tools.YouTubeSearchTool
get_ipython().run_line_magic('pip', 'install --upgrade --quiet pipeline-ai') import os from langchain.chains import LLMChain from langchain.prompts import PromptTemplate from langchain_community.llms import PipelineAI os.environ["PIPELINE_API_KEY"] = "YOUR_API_KEY_HERE" llm = PipelineAI(pipeline_key="YOUR_PIPELINE_KEY", pipeline_kwargs={...}) template = """Question: {question} Answer: Let's think step by step.""" prompt =
PromptTemplate.from_template(template)
langchain.prompts.PromptTemplate.from_template
import re from typing import Union from langchain.agents import ( AgentExecutor, AgentOutputParser, LLMSingleActionAgent, Tool, ) from langchain.chains import LLMChain from langchain.prompts import StringPromptTemplate from langchain_community.utilities import SerpAPIWrapper from langchain_core.agents import AgentAction, AgentFinish from langchain_openai import OpenAI search =
SerpAPIWrapper()
langchain_community.utilities.SerpAPIWrapper
model_url = "http://localhost:5000" from langchain.chains import LLMChain from langchain.globals import set_debug from langchain.prompts import PromptTemplate from langchain_community.llms import TextGen
set_debug(True)
langchain.globals.set_debug
meals = [ "Beef Enchiladas with Feta cheese. Mexican-Greek fusion", "Chicken Flatbreads with red sauce. Italian-Mexican fusion", "Veggie sweet potato quesadillas with vegan cheese", "One-Pan Tortelonni bake with peppers and onions", ] from langchain_openai import OpenAI llm = OpenAI(model="gpt-3.5-turbo-instruct") from langchain.prompts import PromptTemplate PROMPT_TEMPLATE = """Here is the description of a meal: "{meal}". Embed the meal into the given text: "{text_to_personalize}". Prepend a personalized message including the user's name "{user}" and their preference "{preference}". Make it sound good. """ PROMPT = PromptTemplate( input_variables=["meal", "text_to_personalize", "user", "preference"], template=PROMPT_TEMPLATE, ) import langchain_experimental.rl_chain as rl_chain chain = rl_chain.PickBest.from_llm(llm=llm, prompt=PROMPT) response = chain.run( meal=rl_chain.ToSelectFrom(meals), user=rl_chain.BasedOn("Tom"), preference=rl_chain.BasedOn(["Vegetarian", "regular dairy is ok"]), text_to_personalize="This is the weeks specialty dish, our master chefs \ believe you will love it!", ) print(response["response"]) for _ in range(5): try: response = chain.run( meal=rl_chain.ToSelectFrom(meals), user=rl_chain.BasedOn("Tom"), preference=rl_chain.BasedOn(["Vegetarian", "regular dairy is ok"]), text_to_personalize="This is the weeks specialty dish, our master chefs believe you will love it!", ) except Exception as e: print(e) print(response["response"]) print() scoring_criteria_template = ( "Given {preference} rank how good or bad this selection is {meal}" ) chain = rl_chain.PickBest.from_llm( llm=llm, prompt=PROMPT, selection_scorer=rl_chain.AutoSelectionScorer( llm=llm, scoring_criteria_template_str=scoring_criteria_template ), ) response = chain.run( meal=rl_chain.ToSelectFrom(meals), user=rl_chain.BasedOn("Tom"), preference=rl_chain.BasedOn(["Vegetarian", "regular dairy is ok"]), text_to_personalize="This is the weeks specialty dish, our master chefs believe you will love it!", ) print(response["response"]) selection_metadata = response["selection_metadata"] print( f"selected index: {selection_metadata.selected.index}, score: {selection_metadata.selected.score}" ) class CustomSelectionScorer(rl_chain.SelectionScorer): def score_response( self, inputs, llm_response: str, event: rl_chain.PickBestEvent ) -> float: print(event.based_on) print(event.to_select_from) selected_meal = event.to_select_from["meal"][event.selected.index] print(f"selected meal: {selected_meal}") if "Tom" in event.based_on["user"]: if "Vegetarian" in event.based_on["preference"]: if "Chicken" in selected_meal or "Beef" in selected_meal: return 0.0 else: return 1.0 else: if "Chicken" in selected_meal or "Beef" in selected_meal: return 1.0 else: return 0.0 else: raise NotImplementedError("I don't know how to score this user") chain = rl_chain.PickBest.from_llm( llm=llm, prompt=PROMPT, selection_scorer=CustomSelectionScorer(), ) response = chain.run( meal=rl_chain.ToSelectFrom(meals), user=rl_chain.BasedOn("Tom"), preference=rl_chain.BasedOn(["Vegetarian", "regular dairy is ok"]), text_to_personalize="This is the weeks specialty dish, our master chefs believe you will love it!", ) class CustomSelectionScorer(rl_chain.SelectionScorer): def score_preference(self, preference, selected_meal): if "Vegetarian" in preference: if "Chicken" in selected_meal or "Beef" in selected_meal: return 0.0 else: return 1.0 else: if "Chicken" in selected_meal or "Beef" in selected_meal: return 1.0 else: return 0.0 def score_response( self, inputs, llm_response: str, event: rl_chain.PickBestEvent ) -> float: selected_meal = event.to_select_from["meal"][event.selected.index] if "Tom" in event.based_on["user"]: return self.score_preference(event.based_on["preference"], selected_meal) elif "Anna" in event.based_on["user"]: return self.score_preference(event.based_on["preference"], selected_meal) else: raise NotImplementedError("I don't know how to score this user") chain = rl_chain.PickBest.from_llm( llm=llm, prompt=PROMPT, selection_scorer=CustomSelectionScorer(), metrics_step=5, metrics_window_size=5, # rolling window average ) random_chain = rl_chain.PickBest.from_llm( llm=llm, prompt=PROMPT, selection_scorer=CustomSelectionScorer(), metrics_step=5, metrics_window_size=5, # rolling window average policy=rl_chain.PickBestRandomPolicy, # set the random policy instead of default ) for _ in range(20): try: chain.run( meal=rl_chain.ToSelectFrom(meals), user=rl_chain.BasedOn("Tom"), preference=rl_chain.BasedOn(["Vegetarian", "regular dairy is ok"]), text_to_personalize="This is the weeks specialty dish, our master chefs believe you will love it!", ) random_chain.run( meal=rl_chain.ToSelectFrom(meals), user=rl_chain.BasedOn("Tom"), preference=rl_chain.BasedOn(["Vegetarian", "regular dairy is ok"]), text_to_personalize="This is the weeks specialty dish, our master chefs believe you will love it!", ) chain.run( meal=rl_chain.ToSelectFrom(meals), user=rl_chain.BasedOn("Anna"), preference=
rl_chain.BasedOn(["Loves meat", "especially beef"])
langchain_experimental.rl_chain.BasedOn
get_ipython().run_line_magic('pip', 'install xmltodict') from langchain_community.tools.pubmed.tool import PubmedQueryRun tool =
PubmedQueryRun()
langchain_community.tools.pubmed.tool.PubmedQueryRun
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai') from langchain.prompts import PromptTemplate from langchain_core.runnables import ConfigurableField from langchain_openai import ChatOpenAI model = ChatOpenAI(temperature=0).configurable_fields( temperature=ConfigurableField( id="llm_temperature", name="LLM Temperature", description="The temperature of the LLM", ) ) model.invoke("pick a random number") model.with_config(configurable={"llm_temperature": 0.9}).invoke("pick a random number") prompt = PromptTemplate.from_template("Pick a random number above {x}") chain = prompt | model chain.invoke({"x": 0}) chain.with_config(configurable={"llm_temperature": 0.9}).invoke({"x": 0}) from langchain.runnables.hub import HubRunnable prompt = HubRunnable("rlm/rag-prompt").configurable_fields( owner_repo_commit=ConfigurableField( id="hub_commit", name="Hub Commit", description="The Hub commit to pull from", ) ) prompt.invoke({"question": "foo", "context": "bar"}) prompt.with_config(configurable={"hub_commit": "rlm/rag-prompt-llama"}).invoke( {"question": "foo", "context": "bar"} ) from langchain.prompts import PromptTemplate from langchain_community.chat_models import ChatAnthropic from langchain_core.runnables import ConfigurableField from langchain_openai import ChatOpenAI llm = ChatAnthropic(temperature=0).configurable_alternatives( ConfigurableField(id="llm"), default_key="anthropic", openai=ChatOpenAI(), gpt4=
ChatOpenAI(model="gpt-4")
langchain_openai.ChatOpenAI
from langchain_community.utilities import DuckDuckGoSearchAPIWrapper from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate from langchain_core.runnables import RunnablePassthrough from langchain_openai import ChatOpenAI template = """Answer the users question based only on the following context: <context> {context} </context> Question: {question} """ prompt = ChatPromptTemplate.from_template(template) model = ChatOpenAI(temperature=0) search = DuckDuckGoSearchAPIWrapper() def retriever(query): return search.run(query) chain = ( {"context": retriever, "question": RunnablePassthrough()} | prompt | model | StrOutputParser() ) simple_query = "what is langchain?" chain.invoke(simple_query) distracted_query = "man that sam bankman fried trial was crazy! what is langchain?" chain.invoke(distracted_query) retriever(distracted_query) template = """Provide a better search query for \ web search engine to answer the given question, end \ the queries with ’**’. Question: \ {x} Answer:""" rewrite_prompt = ChatPromptTemplate.from_template(template) from langchain import hub rewrite_prompt = hub.pull("langchain-ai/rewrite") print(rewrite_prompt.template) def _parse(text): return text.strip("**") rewriter = rewrite_prompt | ChatOpenAI(temperature=0) |
StrOutputParser()
langchain_core.output_parsers.StrOutputParser
from langchain_community.vectorstores import Bagel texts = ["hello bagel", "hello langchain", "I love salad", "my car", "a dog"] cluster = Bagel.from_texts(cluster_name="testing", texts=texts) cluster.similarity_search("bagel", k=3) cluster.similarity_search_with_score("bagel", k=3) cluster.delete_cluster() from langchain_community.document_loaders import TextLoader from langchain_text_splitters import CharacterTextSplitter loader = TextLoader("../../modules/state_of_the_union.txt") documents = loader.load() text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) docs = text_splitter.split_documents(documents)[:10] cluster = Bagel.from_documents(cluster_name="testing_with_docs", documents=docs) query = "What did the president say about Ketanji Brown Jackson" docs = cluster.similarity_search(query) print(docs[0].page_content[:102]) texts = ["hello bagel", "this is langchain"] cluster =
Bagel.from_texts(cluster_name="testing", texts=texts)
langchain_community.vectorstores.Bagel.from_texts
get_ipython().run_line_magic('pip', 'install --upgrade --quiet pipeline-ai') import os from langchain.chains import LLMChain from langchain.prompts import PromptTemplate from langchain_community.llms import PipelineAI os.environ["PIPELINE_API_KEY"] = "YOUR_API_KEY_HERE" llm =
PipelineAI(pipeline_key="YOUR_PIPELINE_KEY", pipeline_kwargs={...})
langchain_community.llms.PipelineAI
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai') from langchain.model_laboratory import ModelLaboratory from langchain.prompts import PromptTemplate from langchain_community.llms import Cohere, HuggingFaceHub from langchain_openai import OpenAI import getpass import os os.environ["COHERE_API_KEY"] = getpass.getpass("Cohere API Key:") os.environ["OPENAI_API_KEY"] = getpass.getpass("Open API Key:") os.environ["HUGGINGFACEHUB_API_TOKEN"] = getpass.getpass("Hugging Face API Key:") llms = [ OpenAI(temperature=0), Cohere(temperature=0), HuggingFaceHub(repo_id="google/flan-t5-xl", model_kwargs={"temperature": 1}), ] model_lab = ModelLaboratory.from_llms(llms) model_lab.compare("What color is a flamingo?") prompt = PromptTemplate( template="What is the capital of {state}?", input_variables=["state"] ) model_lab_with_prompt =
ModelLaboratory.from_llms(llms, prompt=prompt)
langchain.model_laboratory.ModelLaboratory.from_llms
get_ipython().run_line_magic('pip', 'install -qU langchain langchain-openai langchain-anthropic langchain-community wikipedia') import getpass import os os.environ["OPENAI_API_KEY"] = getpass.getpass() os.environ["ANTHROPIC_API_KEY"] = getpass.getpass() from langchain_community.retrievers import WikipediaRetriever from langchain_core.prompts import ChatPromptTemplate from langchain_openai import ChatOpenAI llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0) wiki = WikipediaRetriever(top_k_results=6, doc_content_chars_max=2000) prompt = ChatPromptTemplate.from_messages( [ ( "system", "You're a helpful AI assistant. Given a user question and some Wikipedia article snippets, answer the user question. If none of the articles answer the question, just say you don't know.\n\nHere are the Wikipedia articles:{context}", ), ("human", "{question}"), ] ) prompt.pretty_print() from operator import itemgetter from typing import List from langchain_core.documents import Document from langchain_core.output_parsers import StrOutputParser from langchain_core.runnables import ( RunnableLambda, RunnableParallel, RunnablePassthrough, ) def format_docs(docs: List[Document]) -> str: """Convert Documents to a single string.:""" formatted = [ f"Article Title: {doc.metadata['title']}\nArticle Snippet: {doc.page_content}" for doc in docs ] return "\n\n" + "\n\n".join(formatted) format = itemgetter("docs") |
RunnableLambda(format_docs)
langchain_core.runnables.RunnableLambda
get_ipython().system('pip install termcolor > /dev/null') import logging logging.basicConfig(level=logging.ERROR) from datetime import datetime, timedelta from typing import List from langchain.docstore import InMemoryDocstore from langchain.retrievers import TimeWeightedVectorStoreRetriever from langchain_community.vectorstores import FAISS from langchain_openai import ChatOpenAI, OpenAIEmbeddings from termcolor import colored USER_NAME = "Person A" # The name you want to use when interviewing the agent. LLM = ChatOpenAI(max_tokens=1500) # Can be any LLM you want. from langchain_experimental.generative_agents import ( GenerativeAgent, GenerativeAgentMemory, ) import math import faiss def relevance_score_fn(score: float) -> float: """Return a similarity score on a scale [0, 1].""" return 1.0 - score / math.sqrt(2) def create_new_memory_retriever(): """Create a new vector store retriever unique to the agent.""" embeddings_model =
OpenAIEmbeddings()
langchain_openai.OpenAIEmbeddings
get_ipython().run_line_magic('pip', 'install --upgrade --quiet boto3 nltk') get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain_experimental') get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain pydantic') import os import boto3 comprehend_client = boto3.client("comprehend", region_name="us-east-1") from langchain_experimental.comprehend_moderation import AmazonComprehendModerationChain comprehend_moderation = AmazonComprehendModerationChain( client=comprehend_client, verbose=True, # optional ) from langchain.prompts import PromptTemplate from langchain_community.llms.fake import FakeListLLM from langchain_experimental.comprehend_moderation.base_moderation_exceptions import ( ModerationPiiError, ) template = """Question: {question} Answer:""" prompt = PromptTemplate.from_template(template) responses = [ "Final Answer: A credit card number looks like 1289-2321-1123-2387. A fake SSN number looks like 323-22-9980. John Doe's phone number is (999)253-9876.", "Final Answer: This is a really <expletive> way of constructing a birdhouse. This is <expletive> insane to think that any birds would actually create their <expletive> nests here.", ] llm = FakeListLLM(responses=responses) chain = ( prompt | comprehend_moderation | {"input": (lambda x: x["output"]) | llm} | comprehend_moderation ) try: response = chain.invoke( { "question": "A sample SSN number looks like this 123-22-3345. Can you give me some more samples?" } ) except ModerationPiiError as e: print(str(e)) else: print(response["output"]) from langchain_experimental.comprehend_moderation import ( BaseModerationConfig, ModerationPiiConfig, ModerationPromptSafetyConfig, ModerationToxicityConfig, ) pii_config = ModerationPiiConfig(labels=["SSN"], redact=True, mask_character="X") toxicity_config = ModerationToxicityConfig(threshold=0.5) prompt_safety_config = ModerationPromptSafetyConfig(threshold=0.5) moderation_config = BaseModerationConfig( filters=[pii_config, toxicity_config, prompt_safety_config] ) comp_moderation_with_config = AmazonComprehendModerationChain( moderation_config=moderation_config, # specify the configuration client=comprehend_client, # optionally pass the Boto3 Client verbose=True, ) from langchain.prompts import PromptTemplate from langchain_community.llms.fake import FakeListLLM template = """Question: {question} Answer:""" prompt = PromptTemplate.from_template(template) responses = [ "Final Answer: A credit card number looks like 1289-2321-1123-2387. A fake SSN number looks like 323-22-9980. John Doe's phone number is (999)253-9876.", "Final Answer: This is a really <expletive> way of constructing a birdhouse. This is <expletive> insane to think that any birds would actually create their <expletive> nests here.", ] llm = FakeListLLM(responses=responses) chain = ( prompt | comp_moderation_with_config | {"input": (lambda x: x["output"]) | llm} | comp_moderation_with_config ) try: response = chain.invoke( { "question": "A sample SSN number looks like this 123-45-7890. Can you give me some more samples?" } ) except Exception as e: print(str(e)) else: print(response["output"]) from langchain_experimental.comprehend_moderation import BaseModerationCallbackHandler class MyModCallback(BaseModerationCallbackHandler): async def on_after_pii(self, output_beacon, unique_id): import json moderation_type = output_beacon["moderation_type"] chain_id = output_beacon["moderation_chain_id"] with open(f"output-{moderation_type}-{chain_id}.json", "w") as file: data = {"beacon_data": output_beacon, "unique_id": unique_id} json.dump(data, file) """ async def on_after_toxicity(self, output_beacon, unique_id): pass async def on_after_prompt_safety(self, output_beacon, unique_id): pass """ my_callback = MyModCallback() pii_config = ModerationPiiConfig(labels=["SSN"], redact=True, mask_character="X") toxicity_config = ModerationToxicityConfig(threshold=0.5) moderation_config = BaseModerationConfig(filters=[pii_config, toxicity_config]) comp_moderation_with_config = AmazonComprehendModerationChain( moderation_config=moderation_config, # specify the configuration client=comprehend_client, # optionally pass the Boto3 Client unique_id="[email protected]", # A unique ID moderation_callback=my_callback, # BaseModerationCallbackHandler verbose=True, ) from langchain.prompts import PromptTemplate from langchain_community.llms.fake import FakeListLLM template = """Question: {question} Answer:""" prompt = PromptTemplate.from_template(template) responses = [ "Final Answer: A credit card number looks like 1289-2321-1123-2387. A fake SSN number looks like 323-22-9980. John Doe's phone number is (999)253-9876.", "Final Answer: This is a really <expletive> way of constructing a birdhouse. This is <expletive> insane to think that any birds would actually create their <expletive> nests here.", ] llm = FakeListLLM(responses=responses) chain = ( prompt | comp_moderation_with_config | {"input": (lambda x: x["output"]) | llm} | comp_moderation_with_config ) try: response = chain.invoke( { "question": "A sample SSN number looks like this 123-456-7890. Can you give me some more samples?" } ) except Exception as e: print(str(e)) else: print(response["output"]) get_ipython().run_line_magic('pip', 'install --upgrade --quiet huggingface_hub') import os os.environ["HUGGINGFACEHUB_API_TOKEN"] = "<YOUR HF TOKEN HERE>" repo_id = "google/flan-t5-xxl" from langchain.prompts import PromptTemplate from langchain_community.llms import HuggingFaceHub template = """{question}""" prompt = PromptTemplate.from_template(template) llm = HuggingFaceHub( repo_id=repo_id, model_kwargs={"temperature": 0.5, "max_length": 256} ) pii_config = ModerationPiiConfig( labels=["SSN", "CREDIT_DEBIT_NUMBER"], redact=True, mask_character="X" ) toxicity_config = ModerationToxicityConfig(threshold=0.5) prompt_safety_config = ModerationPromptSafetyConfig(threshold=0.8) moderation_config_1 = BaseModerationConfig( filters=[pii_config, toxicity_config, prompt_safety_config] ) moderation_config_2 = BaseModerationConfig(filters=[pii_config]) amazon_comp_moderation = AmazonComprehendModerationChain( moderation_config=moderation_config_1, client=comprehend_client, moderation_callback=my_callback, verbose=True, ) amazon_comp_moderation_out = AmazonComprehendModerationChain( moderation_config=moderation_config_2, client=comprehend_client, verbose=True ) chain = ( prompt | amazon_comp_moderation | {"input": (lambda x: x["output"]) | llm} | amazon_comp_moderation_out ) try: response = chain.invoke( { "question": """What is John Doe's address, phone number and SSN from the following text? John Doe, a resident of 1234 Elm Street in Springfield, recently celebrated his birthday on January 1st. Turning 43 this year, John reflected on the years gone by. He often shares memories of his younger days with his close friends through calls on his phone, (555) 123-4567. Meanwhile, during a casual evening, he received an email at [email protected] reminding him of an old acquaintance's reunion. As he navigated through some old documents, he stumbled upon a paper that listed his SSN as 123-45-6789, reminding him to store it in a safer place. """ } ) except Exception as e: print(str(e)) else: print(response["output"]) endpoint_name = "<SAGEMAKER_ENDPOINT_NAME>" # replace with your SageMaker Endpoint name region = "<REGION>" # replace with your SageMaker Endpoint region import json from langchain.prompts import PromptTemplate from langchain_community.llms import SagemakerEndpoint from langchain_community.llms.sagemaker_endpoint import LLMContentHandler class ContentHandler(LLMContentHandler): content_type = "application/json" accepts = "application/json" def transform_input(self, prompt: str, model_kwargs: dict) -> bytes: input_str = json.dumps({"text_inputs": prompt, **model_kwargs}) return input_str.encode("utf-8") def transform_output(self, output: bytes) -> str: response_json = json.loads(output.read().decode("utf-8")) return response_json["generated_texts"][0] content_handler = ContentHandler() template = """From the following 'Document', precisely answer the 'Question'. Do not add any spurious information in your answer. Document: John Doe, a resident of 1234 Elm Street in Springfield, recently celebrated his birthday on January 1st. Turning 43 this year, John reflected on the years gone by. He often shares memories of his younger days with his close friends through calls on his phone, (555) 123-4567. Meanwhile, during a casual evening, he received an email at [email protected] reminding him of an old acquaintance's reunion. As he navigated through some old documents, he stumbled upon a paper that listed his SSN as 123-45-6789, reminding him to store it in a safer place. Question: {question} Answer: """ llm_prompt =
PromptTemplate.from_template(template)
langchain.prompts.PromptTemplate.from_template
get_ipython().run_line_magic('pip', 'install "pgvecto_rs[sdk]"') from typing import List from langchain.docstore.document import Document from langchain_community.document_loaders import TextLoader from langchain_community.embeddings.fake import FakeEmbeddings from langchain_community.vectorstores.pgvecto_rs import PGVecto_rs from langchain_text_splitters import CharacterTextSplitter loader = TextLoader("../../modules/state_of_the_union.txt") documents = loader.load() text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) docs = text_splitter.split_documents(documents) embeddings =
FakeEmbeddings(size=3)
langchain_community.embeddings.fake.FakeEmbeddings
from langchain.callbacks import get_openai_callback from langchain_openai import ChatOpenAI llm =
ChatOpenAI(model_name="gpt-4")
langchain_openai.ChatOpenAI
get_ipython().run_line_magic('pip', 'install --upgrade --quiet unstructured') from langchain_community.document_loaders import UnstructuredEmailLoader loader =
UnstructuredEmailLoader("example_data/fake-email.eml")
langchain_community.document_loaders.UnstructuredEmailLoader
get_ipython().run_line_magic('pip', 'install --upgrade --quiet pymilvus') import getpass import os os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:") from langchain_community.document_loaders import TextLoader from langchain_community.vectorstores import Milvus from langchain_openai import OpenAIEmbeddings from langchain_text_splitters import CharacterTextSplitter from langchain_community.document_loaders import TextLoader loader = TextLoader("../../modules/state_of_the_union.txt") documents = loader.load() text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) docs = text_splitter.split_documents(documents) embeddings = OpenAIEmbeddings() vector_db = Milvus.from_documents( docs, embeddings, connection_args={"host": "127.0.0.1", "port": "19530"}, ) query = "What did the president say about Ketanji Brown Jackson" docs = vector_db.similarity_search(query) docs[0].page_content vector_db = Milvus.from_documents( docs, embeddings, collection_name="collection_1", connection_args={"host": "127.0.0.1", "port": "19530"}, ) vector_db = Milvus( embeddings, connection_args={"host": "127.0.0.1", "port": "19530"}, collection_name="collection_1", ) from langchain_core.documents import Document docs = [ Document(page_content="i worked at kensho", metadata={"namespace": "harrison"}),
Document(page_content="i worked at facebook", metadata={"namespace": "ankush"})
langchain.docstore.document.Document
get_ipython().system('poetry run pip install dgml-utils==0.3.0 --upgrade --quiet') import os from langchain_community.document_loaders import DocugamiLoader DOCUGAMI_API_KEY = os.environ.get("DOCUGAMI_API_KEY") docset_id = "26xpy3aes7xp" document_ids = ["d7jqdzcj50sj", "cgd1eacfkchw"] loader =
DocugamiLoader(docset_id=docset_id, document_ids=document_ids)
langchain_community.document_loaders.DocugamiLoader
get_ipython().system(' pip install -U langchain openai chromadb langchain-experimental # (newest versions required for multi-modal)') get_ipython().system(' pip install "unstructured[all-docs]" pillow pydantic lxml pillow matplotlib chromadb tiktoken') from langchain_text_splitters import CharacterTextSplitter from unstructured.partition.pdf import partition_pdf def extract_pdf_elements(path, fname): """ Extract images, tables, and chunk text from a PDF file. path: File path, which is used to dump images (.jpg) fname: File name """ return partition_pdf( filename=path + fname, extract_images_in_pdf=False, infer_table_structure=True, chunking_strategy="by_title", max_characters=4000, new_after_n_chars=3800, combine_text_under_n_chars=2000, image_output_dir_path=path, ) def categorize_elements(raw_pdf_elements): """ Categorize extracted elements from a PDF into tables and texts. raw_pdf_elements: List of unstructured.documents.elements """ tables = [] texts = [] for element in raw_pdf_elements: if "unstructured.documents.elements.Table" in str(type(element)): tables.append(str(element)) elif "unstructured.documents.elements.CompositeElement" in str(type(element)): texts.append(str(element)) return texts, tables fpath = "/Users/rlm/Desktop/cj/" fname = "cj.pdf" raw_pdf_elements = extract_pdf_elements(fpath, fname) texts, tables = categorize_elements(raw_pdf_elements) text_splitter = CharacterTextSplitter.from_tiktoken_encoder( chunk_size=4000, chunk_overlap=0 ) joined_texts = " ".join(texts) texts_4k_token = text_splitter.split_text(joined_texts) from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate from langchain_openai import ChatOpenAI def generate_text_summaries(texts, tables, summarize_texts=False): """ Summarize text elements texts: List of str tables: List of str summarize_texts: Bool to summarize texts """ prompt_text = """You are an assistant tasked with summarizing tables and text for retrieval. \ These summaries will be embedded and used to retrieve the raw text or table elements. \ Give a concise summary of the table or text that is well optimized for retrieval. Table or text: {element} """ prompt = ChatPromptTemplate.from_template(prompt_text) model = ChatOpenAI(temperature=0, model="gpt-4") summarize_chain = {"element": lambda x: x} | prompt | model | StrOutputParser() text_summaries = [] table_summaries = [] if texts and summarize_texts: text_summaries = summarize_chain.batch(texts, {"max_concurrency": 5}) elif texts: text_summaries = texts if tables: table_summaries = summarize_chain.batch(tables, {"max_concurrency": 5}) return text_summaries, table_summaries text_summaries, table_summaries = generate_text_summaries( texts_4k_token, tables, summarize_texts=True ) import base64 import os from langchain_core.messages import HumanMessage def encode_image(image_path): """Getting the base64 string""" with open(image_path, "rb") as image_file: return base64.b64encode(image_file.read()).decode("utf-8") def image_summarize(img_base64, prompt): """Make image summary""" chat = ChatOpenAI(model="gpt-4-vision-preview", max_tokens=1024) msg = chat.invoke( [ HumanMessage( content=[ {"type": "text", "text": prompt}, { "type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{img_base64}"}, }, ] ) ] ) return msg.content def generate_img_summaries(path): """ Generate summaries and base64 encoded strings for images path: Path to list of .jpg files extracted by Unstructured """ img_base64_list = [] image_summaries = [] prompt = """You are an assistant tasked with summarizing images for retrieval. \ These summaries will be embedded and used to retrieve the raw image. \ Give a concise summary of the image that is well optimized for retrieval.""" for img_file in sorted(os.listdir(path)): if img_file.endswith(".jpg"): img_path = os.path.join(path, img_file) base64_image = encode_image(img_path) img_base64_list.append(base64_image) image_summaries.append(image_summarize(base64_image, prompt)) return img_base64_list, image_summaries img_base64_list, image_summaries = generate_img_summaries(fpath) import uuid from langchain.retrievers.multi_vector import MultiVectorRetriever from langchain.storage import InMemoryStore from langchain_community.vectorstores import Chroma from langchain_core.documents import Document from langchain_openai import OpenAIEmbeddings def create_multi_vector_retriever( vectorstore, text_summaries, texts, table_summaries, tables, image_summaries, images ): """ Create retriever that indexes summaries, but returns raw images or texts """ store = InMemoryStore() id_key = "doc_id" retriever = MultiVectorRetriever( vectorstore=vectorstore, docstore=store, id_key=id_key, ) def add_documents(retriever, doc_summaries, doc_contents): doc_ids = [str(uuid.uuid4()) for _ in doc_contents] summary_docs = [ Document(page_content=s, metadata={id_key: doc_ids[i]}) for i, s in enumerate(doc_summaries) ] retriever.vectorstore.add_documents(summary_docs) retriever.docstore.mset(list(zip(doc_ids, doc_contents))) if text_summaries: add_documents(retriever, text_summaries, texts) if table_summaries: add_documents(retriever, table_summaries, tables) if image_summaries: add_documents(retriever, image_summaries, images) return retriever vectorstore = Chroma( collection_name="mm_rag_cj_blog", embedding_function=
OpenAIEmbeddings()
langchain_openai.OpenAIEmbeddings
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-nvidia-ai-endpoints') import getpass import os if not os.environ.get("NVIDIA_API_KEY", "").startswith("nvapi-"): nvapi_key = getpass.getpass("Enter your NVIDIA API key: ") assert nvapi_key.startswith("nvapi-"), f"{nvapi_key[:5]}... is not a valid key" os.environ["NVIDIA_API_KEY"] = nvapi_key from langchain_nvidia_ai_endpoints import ChatNVIDIA llm = ChatNVIDIA(model="mixtral_8x7b") result = llm.invoke("Write a ballad about LangChain.") print(result.content) print(llm.batch(["What's 2*3?", "What's 2*6?"])) for chunk in llm.stream("How far can a seagull fly in one day?"): print(chunk.content, end="|") async for chunk in llm.astream( "How long does it take for monarch butterflies to migrate?" ): print(chunk.content, end="|") ChatNVIDIA.get_available_models() from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate from langchain_nvidia_ai_endpoints import ChatNVIDIA prompt = ChatPromptTemplate.from_messages( [("system", "You are a helpful AI assistant named Fred."), ("user", "{input}")] ) chain = prompt | ChatNVIDIA(model="llama2_13b") | StrOutputParser() for txt in chain.stream({"input": "What's your name?"}): print(txt, end="") prompt = ChatPromptTemplate.from_messages( [ ( "system", "You are an expert coding AI. Respond only in valid python; no narration whatsoever.", ), ("user", "{input}"), ] ) chain = prompt | ChatNVIDIA(model="llama2_code_70b") | StrOutputParser() for txt in chain.stream({"input": "How do I solve this fizz buzz problem?"}): print(txt, end="") from langchain_nvidia_ai_endpoints import ChatNVIDIA llm = ChatNVIDIA(model="nemotron_steerlm_8b") complex_result = llm.invoke( "What's a PB&J?", labels={"creativity": 0, "complexity": 3, "verbosity": 0} ) print("Un-creative\n") print(complex_result.content) print("\n\nCreative\n") creative_result = llm.invoke( "What's a PB&J?", labels={"creativity": 9, "complexity": 3, "verbosity": 9} ) print(creative_result.content) from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate from langchain_nvidia_ai_endpoints import ChatNVIDIA prompt = ChatPromptTemplate.from_messages( [("system", "You are a helpful AI assistant named Fred."), ("user", "{input}")] ) chain = ( prompt | ChatNVIDIA(model="nemotron_steerlm_8b").bind( labels={"creativity": 9, "complexity": 0, "verbosity": 9} ) | StrOutputParser() ) for txt in chain.stream({"input": "Why is a PB&J?"}): print(txt, end="") import IPython import requests image_url = "https://www.nvidia.com/content/dam/en-zz/Solutions/research/ai-playground/[email protected]" ## Large Image image_content = requests.get(image_url).content IPython.display.Image(image_content) from langchain_nvidia_ai_endpoints import ChatNVIDIA llm = ChatNVIDIA(model="playground_neva_22b") from langchain_core.messages import HumanMessage llm.invoke( [ HumanMessage( content=[ {"type": "text", "text": "Describe this image:"}, {"type": "image_url", "image_url": {"url": image_url}}, ] ) ] ) from langchain_core.messages import HumanMessage llm.invoke( [ HumanMessage( content=[ {"type": "text", "text": "Describe this image:"}, {"type": "image_url", "image_url": {"url": image_url}}, ] ) ], labels={"creativity": 0, "quality": 9, "complexity": 0, "verbosity": 0}, ) import IPython import requests image_url = "https://picsum.photos/seed/kitten/300/200" image_content = requests.get(image_url).content IPython.display.Image(image_content) import base64 from langchain_core.messages import HumanMessage b64_string = base64.b64encode(image_content).decode("utf-8") llm.invoke( [ HumanMessage( content=[ {"type": "text", "text": "Describe this image:"}, { "type": "image_url", "image_url": {"url": f"data:image/png;base64,{b64_string}"}, }, ] ) ] ) base64_with_mime_type = f"data:image/png;base64,{b64_string}" llm.invoke(f'What\'s in this image?\n<img src="{base64_with_mime_type}" />') from langchain_nvidia_ai_endpoints import ChatNVIDIA kosmos = ChatNVIDIA(model="kosmos_2") from langchain_core.messages import HumanMessage def drop_streaming_key(d): """Takes in payload dictionary, outputs new payload dictionary""" if "stream" in d: d.pop("stream") return d kosmos = ChatNVIDIA(model="kosmos_2") kosmos.client.payload_fn = drop_streaming_key kosmos.invoke( [ HumanMessage( content=[ {"type": "text", "text": "Describe this image:"}, {"type": "image_url", "image_url": {"url": image_url}}, ] ) ] ) import base64 from io import BytesIO from PIL import Image img_gen = ChatNVIDIA(model="sdxl_turbo") def to_sdxl_payload(d): if d: d = {"prompt": d.get("messages", [{}])[0].get("content")} d["inference_steps"] = 4 ## why not add another argument? return d img_gen.client.payload_fn = to_sdxl_payload def to_pil_img(d): return Image.open(BytesIO(base64.b64decode(d))) (img_gen | StrOutputParser() | to_pil_img).invoke("white cat playing") from langchain_core.messages import ChatMessage from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate from langchain_nvidia_ai_endpoints import ChatNVIDIA prompt = ChatPromptTemplate.from_messages( [ ChatMessage( role="context", content="Parrots and Cats have signed the peace accord." ), ("user", "{input}"), ] ) llm =
ChatNVIDIA(model="nemotron_qa_8b")
langchain_nvidia_ai_endpoints.ChatNVIDIA
import zipfile import requests def download_and_unzip(url: str, output_path: str = "file.zip") -> None: file_id = url.split("/")[-2] download_url = f"https://drive.google.com/uc?export=download&id={file_id}" response = requests.get(download_url) if response.status_code != 200: print("Failed to download the file.") return with open(output_path, "wb") as file: file.write(response.content) print(f"File {output_path} downloaded.") with zipfile.ZipFile(output_path, "r") as zip_ref: zip_ref.extractall() print(f"File {output_path} has been unzipped.") url = ( "https://drive.google.com/file/d/1rh1s1o2i7B-Sk1v9o8KNgivLVGwJ-osV/view?usp=sharing" ) download_and_unzip(url) directory_path = "./hogwarts" from langchain_community.chat_loaders.facebook_messenger import ( FolderFacebookMessengerChatLoader, SingleFileFacebookMessengerChatLoader, ) loader = SingleFileFacebookMessengerChatLoader( path="./hogwarts/inbox/HermioneGranger/messages_Hermione_Granger.json", ) chat_session = loader.load()[0] chat_session["messages"][:3] loader = FolderFacebookMessengerChatLoader( path="./hogwarts", ) chat_sessions = loader.load() len(chat_sessions) from langchain_community.chat_loaders.utils import ( map_ai_messages, merge_chat_runs, ) merged_sessions = merge_chat_runs(chat_sessions) alternating_sessions = list(map_ai_messages(merged_sessions, "Harry Potter")) alternating_sessions[0]["messages"][:3] from langchain.adapters.openai import convert_messages_for_finetuning training_data = convert_messages_for_finetuning(alternating_sessions) print(f"Prepared {len(training_data)} dialogues for training") training_data[0][:3] chunk_size = 8 overlap = 2 training_examples = [ conversation_messages[i : i + chunk_size] for conversation_messages in training_data for i in range(0, len(conversation_messages) - chunk_size + 1, chunk_size - overlap) ] len(training_examples) get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-openai') import json import time from io import BytesIO import openai my_file = BytesIO() for m in training_examples: my_file.write((json.dumps({"messages": m}) + "\n").encode("utf-8")) my_file.seek(0) training_file = openai.files.create(file=my_file, purpose="fine-tune") status = openai.files.retrieve(training_file.id).status start_time = time.time() while status != "processed": print(f"Status=[{status}]... {time.time() - start_time:.2f}s", end="\r", flush=True) time.sleep(5) status = openai.files.retrieve(training_file.id).status print(f"File {training_file.id} ready after {time.time() - start_time:.2f} seconds.") job = openai.fine_tuning.jobs.create( training_file=training_file.id, model="gpt-3.5-turbo", ) status = openai.fine_tuning.jobs.retrieve(job.id).status start_time = time.time() while status != "succeeded": print(f"Status=[{status}]... {time.time() - start_time:.2f}s", end="\r", flush=True) time.sleep(5) job = openai.fine_tuning.jobs.retrieve(job.id) status = job.status print(job.fine_tuned_model) from langchain_openai import ChatOpenAI model = ChatOpenAI( model=job.fine_tuned_model, temperature=1, ) from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate prompt = ChatPromptTemplate.from_messages( [ ("human", "{input}"), ] ) chain = prompt | model |
StrOutputParser()
langchain_core.output_parsers.StrOutputParser
from langchain.chains import ConversationalRetrievalChain from langchain.chains.query_constructor.base import AttributeInfo from langchain.retrievers.self_query.base import SelfQueryRetriever from langchain_community.document_loaders import TextLoader from langchain_community.embeddings import FakeEmbeddings from langchain_community.vectorstores import Vectara from langchain_core.documents import Document from langchain_openai import OpenAI from langchain_text_splitters import CharacterTextSplitter docs = [ Document( page_content="A bunch of scientists bring back dinosaurs and mayhem breaks loose", metadata={"year": 1993, "rating": 7.7, "genre": "science fiction"}, ), Document( page_content="Leo DiCaprio gets lost in a dream within a dream within a dream within a ...", metadata={"year": 2010, "director": "Christopher Nolan", "rating": 8.2}, ), Document( page_content="A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea", metadata={"year": 2006, "director": "Satoshi Kon", "rating": 8.6}, ), Document( page_content="A bunch of normal-sized women are supremely wholesome and some men pine after them", metadata={"year": 2019, "director": "Greta Gerwig", "rating": 8.3}, ), Document( page_content="Toys come alive and have a blast doing so", metadata={"year": 1995, "genre": "animated"}, ), Document( page_content="Three men walk into the Zone, three men walk out of the Zone", metadata={ "year": 1979, "rating": 9.9, "director": "Andrei Tarkovsky", "genre": "science fiction", }, ), ] vectara = Vectara() for doc in docs: vectara.add_texts( [doc.page_content], embedding=FakeEmbeddings(size=768), doc_metadata=doc.metadata, ) from langchain.chains.query_constructor.base import AttributeInfo from langchain.retrievers.self_query.base import SelfQueryRetriever from langchain_openai import OpenAI metadata_field_info = [ AttributeInfo( name="genre", description="The genre of the movie", type="string or list[string]", ), AttributeInfo( name="year", description="The year the movie was released", type="integer", ), AttributeInfo( name="director", description="The name of the movie director", type="string", ), AttributeInfo( name="rating", description="A 1-10 rating for the movie", type="float" ), ] document_content_description = "Brief summary of a movie" llm =
OpenAI(temperature=0)
langchain_openai.OpenAI
from langchain.evaluation import RegexMatchStringEvaluator evaluator = RegexMatchStringEvaluator() from langchain.evaluation import load_evaluator evaluator = load_evaluator("regex_match") evaluator.evaluate_strings( prediction="The delivery will be made on 2024-01-05", reference=".*\\b\\d{4}-\\d{2}-\\d{2}\\b.*", ) evaluator.evaluate_strings( prediction="The delivery will be made on 2024-01-05", reference=".*\\b\\d{2}-\\d{2}-\\d{4}\\b.*", ) evaluator.evaluate_strings( prediction="The delivery will be made on 01-05-2024", reference=".*\\b\\d{2}-\\d{2}-\\d{4}\\b.*", ) evaluator.evaluate_strings( prediction="The delivery will be made on 01-05-2024", reference="|".join( [".*\\b\\d{4}-\\d{2}-\\d{2}\\b.*", ".*\\b\\d{2}-\\d{2}-\\d{4}\\b.*"] ), ) import re evaluator =
RegexMatchStringEvaluator(flags=re.IGNORECASE)
langchain.evaluation.RegexMatchStringEvaluator
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-core langchain langchain-openai') from langchain.utils.math import cosine_similarity from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import PromptTemplate from langchain_core.runnables import RunnableLambda, RunnablePassthrough from langchain_openai import ChatOpenAI, OpenAIEmbeddings physics_template = """You are a very smart physics professor. \ You are great at answering questions about physics in a concise and easy to understand manner. \ When you don't know the answer to a question you admit that you don't know. Here is a question: {query}""" math_template = """You are a very good mathematician. You are great at answering math questions. \ You are so good because you are able to break down hard problems into their component parts, \ answer the component parts, and then put them together to answer the broader question. Here is a question: {query}""" embeddings = OpenAIEmbeddings() prompt_templates = [physics_template, math_template] prompt_embeddings = embeddings.embed_documents(prompt_templates) def prompt_router(input): query_embedding = embeddings.embed_query(input["query"]) similarity = cosine_similarity([query_embedding], prompt_embeddings)[0] most_similar = prompt_templates[similarity.argmax()] print("Using MATH" if most_similar == math_template else "Using PHYSICS") return
PromptTemplate.from_template(most_similar)
langchain_core.prompts.PromptTemplate.from_template
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-google-alloydb-pg langchain-google-vertexai') from google.colab import auth auth.authenticate_user() PROJECT_ID = "my-project-id" # @param {type:"string"} get_ipython().system('gcloud config set project {PROJECT_ID}') get_ipython().system('gcloud services enable alloydb.googleapis.com') REGION = "us-central1" # @param {type: "string"} CLUSTER = "my-cluster" # @param {type: "string"} INSTANCE = "my-primary" # @param {type: "string"} DATABASE = "my-database" # @param {type: "string"} TABLE_NAME = "vector_store" # @param {type: "string"} from langchain_google_alloydb_pg import AlloyDBEngine engine = await AlloyDBEngine.afrom_instance( project_id=PROJECT_ID, region=REGION, cluster=CLUSTER, instance=INSTANCE, database=DATABASE, ) await engine.ainit_vectorstore_table( table_name=TABLE_NAME, vector_size=768, # Vector size for VertexAI model(textembedding-gecko@latest) ) get_ipython().system('gcloud services enable aiplatform.googleapis.com') from langchain_google_vertexai import VertexAIEmbeddings embedding = VertexAIEmbeddings( model_name="textembedding-gecko@latest", project=PROJECT_ID ) from langchain_google_alloydb_pg import AlloyDBVectorStore store = await AlloyDBVectorStore.create( engine=engine, table_name=TABLE_NAME, embedding_service=embedding, ) import uuid all_texts = ["Apples and oranges", "Cars and airplanes", "Pineapple", "Train", "Banana"] metadatas = [{"len": len(t)} for t in all_texts] ids = [str(uuid.uuid4()) for _ in all_texts] await store.aadd_texts(all_texts, metadatas=metadatas, ids=ids) await store.adelete([ids[1]]) query = "I'd like a fruit." docs = await store.asimilarity_search(query) print(docs) query_vector = embedding.embed_query(query) docs = await store.asimilarity_search_by_vector(query_vector, k=2) print(docs) from langchain_google_alloydb_pg.indexes import IVFFlatIndex index = IVFFlatIndex() await store.aapply_vector_index(index) await store.areindex() # Re-index using default index name await store.adrop_vector_index() # Delete index using default name from langchain_google_alloydb_pg import Column TABLE_NAME = "vectorstore_custom" await engine.ainit_vectorstore_table( table_name=TABLE_NAME, vector_size=768, # VertexAI model: textembedding-gecko@latest metadata_columns=[
Column("len", "INTEGER")
langchain_google_alloydb_pg.Column
import requests def download_drive_file(url: str, output_path: str = "chat.db") -> None: file_id = url.split("/")[-2] download_url = f"https://drive.google.com/uc?export=download&id={file_id}" response = requests.get(download_url) if response.status_code != 200: print("Failed to download the file.") return with open(output_path, "wb") as file: file.write(response.content) print(f"File {output_path} downloaded.") url = ( "https://drive.google.com/file/d/1NebNKqTA2NXApCmeH6mu0unJD2tANZzo/view?usp=sharing" ) download_drive_file(url) from langchain_community.chat_loaders.imessage import IMessageChatLoader loader = IMessageChatLoader( path="./chat.db", ) from typing import List from langchain_community.chat_loaders.base import ChatSession from langchain_community.chat_loaders.utils import ( map_ai_messages, merge_chat_runs, ) raw_messages = loader.lazy_load() merged_messages =
merge_chat_runs(raw_messages)
langchain_community.chat_loaders.utils.merge_chat_runs
from langchain_community.document_loaders import WebBaseLoader from langchain_community.vectorstores import Chroma from langchain_openai import OpenAIEmbeddings from langchain_text_splitters import RecursiveCharacterTextSplitter loader = WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/") data = loader.load() text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0) splits = text_splitter.split_documents(data) embedding = OpenAIEmbeddings() vectordb = Chroma.from_documents(documents=splits, embedding=embedding) from langchain.retrievers.multi_query import MultiQueryRetriever from langchain_openai import ChatOpenAI question = "What are the approaches to Task Decomposition?" llm = ChatOpenAI(temperature=0) retriever_from_llm = MultiQueryRetriever.from_llm( retriever=vectordb.as_retriever(), llm=llm ) import logging logging.basicConfig() logging.getLogger("langchain.retrievers.multi_query").setLevel(logging.INFO) unique_docs = retriever_from_llm.get_relevant_documents(query=question) len(unique_docs) from typing import List from langchain.chains import LLMChain from langchain.output_parsers import PydanticOutputParser from langchain.prompts import PromptTemplate from pydantic import BaseModel, Field class LineList(BaseModel): lines: List[str] = Field(description="Lines of text") class LineListOutputParser(PydanticOutputParser): def __init__(self) -> None: super().__init__(pydantic_object=LineList) def parse(self, text: str) -> LineList: lines = text.strip().split("\n") return LineList(lines=lines) output_parser = LineListOutputParser() QUERY_PROMPT = PromptTemplate( input_variables=["question"], template="""You are an AI language model assistant. Your task is to generate five different versions of the given user question to retrieve relevant documents from a vector database. By generating multiple perspectives on the user question, your goal is to help the user overcome some of the limitations of the distance-based similarity search. Provide these alternative questions separated by newlines. Original question: {question}""", ) llm = ChatOpenAI(temperature=0) llm_chain =
LLMChain(llm=llm, prompt=QUERY_PROMPT, output_parser=output_parser)
langchain.chains.LLMChain
get_ipython().run_line_magic('pip', 'install --upgrade --quiet lark chromadb') from langchain_community.vectorstores import Chroma from langchain_core.documents import Document from langchain_openai import OpenAIEmbeddings docs = [ Document( page_content="A bunch of scientists bring back dinosaurs and mayhem breaks loose", metadata={"year": 1993, "rating": 7.7, "genre": "science fiction"}, ), Document( page_content="Leo DiCaprio gets lost in a dream within a dream within a dream within a ...", metadata={"year": 2010, "director": "Christopher Nolan", "rating": 8.2}, ), Document( page_content="A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea", metadata={"year": 2006, "director": "Satoshi Kon", "rating": 8.6}, ), Document( page_content="A bunch of normal-sized women are supremely wholesome and some men pine after them", metadata={"year": 2019, "director": "Greta Gerwig", "rating": 8.3}, ), Document( page_content="Toys come alive and have a blast doing so", metadata={"year": 1995, "genre": "animated"}, ), Document( page_content="Three men walk into the Zone, three men walk out of the Zone", metadata={ "year": 1979, "director": "Andrei Tarkovsky", "genre": "thriller", "rating": 9.9, }, ), ] vectorstore = Chroma.from_documents(docs, OpenAIEmbeddings()) from langchain.chains.query_constructor.base import AttributeInfo from langchain.retrievers.self_query.base import SelfQueryRetriever from langchain_openai import ChatOpenAI metadata_field_info = [ AttributeInfo( name="genre", description="The genre of the movie. One of ['science fiction', 'comedy', 'drama', 'thriller', 'romance', 'action', 'animated']", type="string", ), AttributeInfo( name="year", description="The year the movie was released", type="integer", ), AttributeInfo( name="director", description="The name of the movie director", type="string", ), AttributeInfo( name="rating", description="A 1-10 rating for the movie", type="float" ), ] document_content_description = "Brief summary of a movie" llm =
ChatOpenAI(temperature=0)
langchain_openai.ChatOpenAI
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain') get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-openai') get_ipython().run_line_magic('pip', 'install --upgrade --quiet psycopg2-binary') get_ipython().run_line_magic('pip', 'install --upgrade --quiet tiktoken') YBUSER = "[SANDBOX USER]" YBPASSWORD = "[SANDBOX PASSWORD]" YBDATABASE = "[SANDBOX_DATABASE]" YBHOST = "trialsandbox.sandbox.aws.yellowbrickcloud.com" OPENAI_API_KEY = "[OPENAI API KEY]" import os import pathlib import re import sys import urllib.parse as urlparse from getpass import getpass import psycopg2 from IPython.display import Markdown, display from langchain.chains import LLMChain, RetrievalQAWithSourcesChain from langchain.docstore.document import Document from langchain_community.vectorstores import Yellowbrick from langchain_openai import ChatOpenAI, OpenAIEmbeddings from langchain_text_splitters import RecursiveCharacterTextSplitter yellowbrick_connection_string = ( f"postgres://{urlparse.quote(YBUSER)}:{YBPASSWORD}@{YBHOST}:5432/{YBDATABASE}" ) YB_DOC_DATABASE = "sample_data" YB_DOC_TABLE = "yellowbrick_documentation" embedding_table = "my_embeddings" os.environ["OPENAI_API_KEY"] = OPENAI_API_KEY from langchain.prompts.chat import ( ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate, ) system_template = """If you don't know the answer, Make up your best guess.""" messages = [ SystemMessagePromptTemplate.from_template(system_template),
HumanMessagePromptTemplate.from_template("{question}")
langchain.prompts.chat.HumanMessagePromptTemplate.from_template
from typing import List from langchain.prompts.chat import ( HumanMessagePromptTemplate, SystemMessagePromptTemplate, ) from langchain.schema import ( AIMessage, BaseMessage, HumanMessage, SystemMessage, ) from langchain_openai import ChatOpenAI class CAMELAgent: def __init__( self, system_message: SystemMessage, model: ChatOpenAI, ) -> None: self.system_message = system_message self.model = model self.init_messages() def reset(self) -> None: self.init_messages() return self.stored_messages def init_messages(self) -> None: self.stored_messages = [self.system_message] def update_messages(self, message: BaseMessage) -> List[BaseMessage]: self.stored_messages.append(message) return self.stored_messages def step( self, input_message: HumanMessage, ) -> AIMessage: messages = self.update_messages(input_message) output_message = self.model(messages) self.update_messages(output_message) return output_message import os os.environ["OPENAI_API_KEY"] = "" assistant_role_name = "Python Programmer" user_role_name = "Stock Trader" task = "Develop a trading bot for the stock market" word_limit = 50 # word limit for task brainstorming task_specifier_sys_msg = SystemMessage(content="You can make a task more specific.") task_specifier_prompt = """Here is a task that {assistant_role_name} will help {user_role_name} to complete: {task}. Please make it more specific. Be creative and imaginative. Please reply with the specified task in {word_limit} words or less. Do not add anything else.""" task_specifier_template = HumanMessagePromptTemplate.from_template( template=task_specifier_prompt ) task_specify_agent = CAMELAgent(task_specifier_sys_msg, ChatOpenAI(temperature=1.0)) task_specifier_msg = task_specifier_template.format_messages( assistant_role_name=assistant_role_name, user_role_name=user_role_name, task=task, word_limit=word_limit, )[0] specified_task_msg = task_specify_agent.step(task_specifier_msg) print(f"Specified task: {specified_task_msg.content}") specified_task = specified_task_msg.content assistant_inception_prompt = """Never forget you are a {assistant_role_name} and I am a {user_role_name}. Never flip roles! Never instruct me! We share a common interest in collaborating to successfully complete a task. You must help me to complete the task. Here is the task: {task}. Never forget our task! I must instruct you based on your expertise and my needs to complete the task. I must give you one instruction at a time. You must write a specific solution that appropriately completes the requested instruction. You must decline my instruction honestly if you cannot perform the instruction due to physical, moral, legal reasons or your capability and explain the reasons. Do not add anything else other than your solution to my instruction. You are never supposed to ask me any questions you only answer questions. You are never supposed to reply with a flake solution. Explain your solutions. Your solution must be declarative sentences and simple present tense. Unless I say the task is completed, you should always start with: Solution: <YOUR_SOLUTION> <YOUR_SOLUTION> should be specific and provide preferable implementations and examples for task-solving. Always end <YOUR_SOLUTION> with: Next request.""" user_inception_prompt = """Never forget you are a {user_role_name} and I am a {assistant_role_name}. Never flip roles! You will always instruct me. We share a common interest in collaborating to successfully complete a task. I must help you to complete the task. Here is the task: {task}. Never forget our task! You must instruct me based on my expertise and your needs to complete the task ONLY in the following two ways: 1. Instruct with a necessary input: Instruction: <YOUR_INSTRUCTION> Input: <YOUR_INPUT> 2. Instruct without any input: Instruction: <YOUR_INSTRUCTION> Input: None The "Instruction" describes a task or question. The paired "Input" provides further context or information for the requested "Instruction". You must give me one instruction at a time. I must write a response that appropriately completes the requested instruction. I must decline your instruction honestly if I cannot perform the instruction due to physical, moral, legal reasons or my capability and explain the reasons. You should instruct me not ask me questions. Now you must start to instruct me using the two ways described above. Do not add anything else other than your instruction and the optional corresponding input! Keep giving me instructions and necessary inputs until you think the task is completed. When the task is completed, you must only reply with a single word <CAMEL_TASK_DONE>. Never say <CAMEL_TASK_DONE> unless my responses have solved your task.""" def get_sys_msgs(assistant_role_name: str, user_role_name: str, task: str): assistant_sys_template = SystemMessagePromptTemplate.from_template( template=assistant_inception_prompt ) assistant_sys_msg = assistant_sys_template.format_messages( assistant_role_name=assistant_role_name, user_role_name=user_role_name, task=task, )[0] user_sys_template = SystemMessagePromptTemplate.from_template( template=user_inception_prompt ) user_sys_msg = user_sys_template.format_messages( assistant_role_name=assistant_role_name, user_role_name=user_role_name, task=task, )[0] return assistant_sys_msg, user_sys_msg assistant_sys_msg, user_sys_msg = get_sys_msgs( assistant_role_name, user_role_name, specified_task ) assistant_agent = CAMELAgent(assistant_sys_msg, ChatOpenAI(temperature=0.2)) user_agent = CAMELAgent(user_sys_msg, ChatOpenAI(temperature=0.2)) assistant_agent.reset() user_agent.reset() user_msg = HumanMessage( content=( f"{user_sys_msg.content}. " "Now start to give me introductions one by one. " "Only reply with Instruction and Input." ) ) assistant_msg = HumanMessage(content=f"{assistant_sys_msg.content}") assistant_msg = assistant_agent.step(user_msg) print(f"Original task prompt:\n{task}\n") print(f"Specified task prompt:\n{specified_task}\n") chat_turn_limit, n = 30, 0 while n < chat_turn_limit: n += 1 user_ai_msg = user_agent.step(assistant_msg) user_msg =
HumanMessage(content=user_ai_msg.content)
langchain.schema.HumanMessage
import os os.environ["GOLDEN_API_KEY"] = "" from langchain_community.utilities.golden_query import GoldenQueryAPIWrapper golden_query =
GoldenQueryAPIWrapper()
langchain_community.utilities.golden_query.GoldenQueryAPIWrapper
get_ipython().run_line_magic('pip', 'install --upgrade --quiet atlassian-python-api') import os from langchain.agents import AgentType, initialize_agent from langchain_community.agent_toolkits.jira.toolkit import JiraToolkit from langchain_community.utilities.jira import JiraAPIWrapper from langchain_openai import OpenAI os.environ["JIRA_API_TOKEN"] = "abc" os.environ["JIRA_USERNAME"] = "123" os.environ["JIRA_INSTANCE_URL"] = "https://jira.atlassian.com" os.environ["OPENAI_API_KEY"] = "xyz" llm =
OpenAI(temperature=0)
langchain_openai.OpenAI