1234567891011121314151617181920212223242526272829303132333435363738394041424344 |
- from langchain_community.llms import Ollama
- from langchain_community.embeddings import OllamaEmbeddings
- from langchain.prompts import ChatPromptTemplate
- from langchain_core.output_parsers import StrOutputParser
- from langchain_community.document_loaders.image import UnstructuredImageLoader
- from langchain.chains.combine_documents import create_stuff_documents_chain
- from langchain_community.vectorstores import Chroma
- from langchain.chains import create_retrieval_chain
- from langchain.text_splitter import RecursiveCharacterTextSplitter
- MODEL = "llava"
- prompt = ChatPromptTemplate.from_template(
- """
- Answer only based on the following provided context. If you know the answer but it's not based in the
- provided context, don't provide the answer, just state the answer is not in the context provided:
- <context>
- {context}
- </context>
- User: {input}
- """
- )
- llm = Ollama(model=MODEL)
- loader = UnstructuredImageLoader("img/plate_2.jpeg")
- data = loader.load()
- text_splitter = RecursiveCharacterTextSplitter(chunk_size=1024, chunk_overlap=100)
- chunks = text_splitter.split_documents(data)
- vector = Chroma.from_documents(data, OllamaEmbeddings())
- retriever = vector.as_retriever()
- output_parser = StrOutputParser()
- document_chain = create_stuff_documents_chain(llm, prompt, output_parser=output_parser)
- retrieval_chain = create_retrieval_chain(retriever, document_chain)
- response = retrieval_chain.invoke({
- "input": """
- Estás viendo la imagen de un vehículo, intenta identificar la placa del vehículo
- """
- })
- print(response["answer"])
|