return FALSE; $r = well_tag_thread__update(array('id' => $id), $update); return $r; } function well_tag_thread_find($tagid, $page, $pagesize) { $arr = well_tag_thread__find(array('tagid' => $tagid), array('id' => -1), $page, $pagesize); return $arr; } function well_tag_thread_find_by_tid($tid, $page, $pagesize) { $arr = well_tag_thread__find(array('tid' => $tid), array(), $page, $pagesize); return $arr; } ?>python - Trying to deploy my first modal app with a chrona database but the data is not being used. Need help debugging retrieve
最新消息:雨落星辰是一个专注网站SEO优化、网站SEO诊断、搜索引擎研究、网络营销推广、网站策划运营及站长类的自媒体原创博客

python - Trying to deploy my first modal app with a chrona database but the data is not being used. Need help debugging retrieve

programmeradmin3浏览0评论

I am having trouble figuring out why I can't see the print statements in the terminal for my retrieveInfoForQuery function and trying to figure out what is wrong. I have verified the chroma db is on the volume. Here is the code.

from langchain_core.tools import tool
    
from langchain_core.messages import SystemMessage
from langchain import hub
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.document_loaders import CSVLoader
from langgraph.graph import MessagesState, StateGraph
from langchain_chroma import Chroma
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnablePassthrough, RunnableMap
from langchain_core.documents import Document
from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint
from langchain_community.llms import HuggingFaceHub
from typing_extensions import List, TypedDict
from langchain.chat_models import init_chat_model
from langchain_openai import OpenAIEmbeddings
import sys
import modal
import os


# Create an image with dependencies
image = modal.Image.debian_slim().pip_install(
    "openai", "langchain", "langchain_community", "langchain_core",
    "langchain_huggingface", "langchain_openai", "langgraph", "langchain_chroma"
)

# Create Modal app
app = modal.App("rag-modal-deployment", image=image)

# Define image correctly


    # Persistent storage
vectorstore_volume = modal.Volume.from_name("gotquestions-storage",create_if_missing=True)

# Define CSV processing function



# Define RAG function
class State(MessagesState):
    context: List[Document]
@app.function(volumes={"/vectorstore":vectorstore_volume},secrets=[modal.Secret.from_name("openai-secret"),modal.Secret.from_name("langsmith-secret")],timeout=6000)
def loadData(forceUpload):
    # Load or create vectorstore
    vectorstore_path = "/vectorstore"
    if forceUpload == "true":
        print("Created new vector store.")

        # Load CSV
        loader = CSVLoader(file_path="/vectorstore/gotquestions.csv", encoding="utf8", csv_args={'delimiter': ',', 'quotechar': '"'}, metadata_columns=["url", "question"])
        docs = loader.load()  

        # Split Documents
        text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=500)
        splits = text_splitter.split_documents(docs)

        # Create Vector Store
        vectorstore = Chroma.from_documents(
            documents=splits, 
            embedding=OpenAIEmbeddings(model="text-embedding-3-large"),
            persist_directory=vectorstore_path
        )
    else:
        print("Loaded existing vector store.")
        vectorstore = Chroma(persist_directory=vectorstore_path, embedding_function=OpenAIEmbeddings(model="text-embedding-3-large"))
    print("done")
    

    return vectorstore

        
@app.function(secrets=[modal.Secret.from_name("openai-secret"),modal.Secret.from_name("langsmith-secret")], volumes={"/vectorstore": vectorstore_volume},timeout=6000)
@modal.fastapi_endpoint(docs=True)
def getDataAndAnswerQuestion(question: str,forceUpload:str):
    

    # Set environment variables
    #os.environ["OPENAI_API_KEY"] = modal.Secret().get("OPENAI_API_KEY")
    #os.environ["HUGGINGFACEHUB_API_TOKEN"] = modal.Secret().get("HUGGINGFACEHUB_API_TOKEN")

    # Load data
    #loadData.remote(forceUpload)
    graph_builder = StateGraph(State)
    from langgraph.graph import END
    from langgraph.prebuilt import ToolNode, tools_condition

    graph_builder.add_node(query_or_respond)
    graph_builder.add_node(generate)
    graph_builder.set_entry_point("query_or_respond")
    graph_builder.add_edge("query_or_respond", "generate")
    graph_builder.add_edge("generate", END)

    graph = graph_builderpile()
    finalAnswer = graph.invoke({"messages": [{"role": "user", "content": question}], "context": ""})
    #for step in graph.stream({"messages": [{"role": "user", "content": question}], "context": ""},stream_mode="values"):
        #step["messages"][-1].pretty_print()
    # Return formatted results
    sources_html = "".join(f'<a href="{doc.metadata["url"]}">{doc.metadata["question"]}</a><br>' for doc in finalAnswer["context"])

    return {"content": finalAnswer["messages"][-1].content, "sources": sources_html}


@tool(response_format="content_and_artifact")

def retrieveInfoForQuery(query: str):
    """Retrieve information related to a query."""
    print("retrieving... "+query)
    vectorstore_path = "/vectorstore"
    vectorstore=loadData.remote("false")
    if isinstance(vectorstore, Chroma):  # Ensure it's properly loaded
        retrieved_docs = vectorstore.similarity_search(query, k=2)
    else:
        raise ValueError("Vectorstore did not initialize correctly.")
    retrieved_docs = vectorstore.similarity_search(query, k=2)
    

    #print("retrieved... "+str(retrieved_docs))
    
    serialized = "\n\n".join(
        (f"Source: {doc.metadata}\n" f"Content: {doc.page_content}")
        for doc in retrieved_docs
    )
    return serialized, retrieved_docs

def query_or_respond(state: MessagesState):
    """Generate tool call for retrieval or respond."""
    llm = init_chat_model("gpt-4o", model_provider="openai")
    llm_with_tools = llm.bind_tools([retrieveInfoForQuery])
    response = llm_with_tools.invoke(state["messages"])
    
    return {"messages": [response]}

def generate(state: State):
    """Generate answer."""
    tool_messages = [
        message for message in reversed(state["messages"])
        if message.type == "tool"
    ][::-1]

    docs_content = "\n\n".join(doc.content for doc in tool_messages)
    system_message_content = (
        "You are an assistant for question-answering tasks. "
        "Use the following pieces of retrieved context to answer "
        "the question. If you don't know the answer, say that you "
        "don't know. Keep the answer concise. Only use data from the tool."
        "\n\n"
        f"{docs_content}"
    )

    conversation_messages = [
        message for message in state["messages"]
        if message.type in ("human", "system") or (message.type == "ai" and not message.tool_calls)
    ]

    prompt = [SystemMessage(system_message_content)] + conversation_messages
    llm = init_chat_model("gpt-4o", model_provider="openai")
    response = llm.invoke(prompt)

    context = []
    for tool_message in tool_messages:
        context.extend(tool_message.artifact)

    return {"messages": [response], "context": context}


@app.local_entrypoint()
def main():
     #retrieveInfoForQuery("who was Jesus")
     vector=loadData.remote("true")
     print(type(vector))
    

Thanks for any ehlp you can provide.

与本文相关的文章

发布评论

评论列表(0)

  1. 暂无评论