# Use the official Python image for Python 3.11 FROM python:3.11 # Set the working directory in the container WORKDIR /app # Copy the current directory contents into the container at /app COPY . /app # ARG FORCE_CMAKE=1 # ARG CMAKE_ARGS="-DLLAMA_CUBLAS=on" # Install project dependencies RUN CMAKE_ARGS="-DLLAMA_CUBLAS=on" python -m pip install -r requirements.txt # Command to run the server CMD ["python", "./server.py"]
client = weaviate.Client( url = "http://localhost:8080", # Replace with your endpoint ) class_obj = { "class": "testing", "vectorizer": "none", # If set to "none" you must always provide vectors yourself. Could be any other "text2vec-*" also. } # client.schema.create_class(class_obj) llm = my_llm.get_llm() embed_model = embed_model = HuggingFaceEmbeddings( model_name="sentence-transformers/all-mpnet-base-v2" ) service_context = ServiceContext.from_defaults( llm=llm, embed_model=embed_model, ) # load the blogs in using the reader doc = SimpleDirectoryReader(input_files=['./docs/Control.csv']).load_data() # chunk up the blog posts into nodes parser = SimpleNodeParser.from_defaults(chunk_size=1024, chunk_overlap=20) nodes = parser.get_nodes_from_documents(doc) print("-----LOGGING----- generated nodes") # construct vector store vector_store = WeaviateVectorStore(weaviate_client = client, index_name="Control", text_key="Control.csv",service_context = service_context) print("-----LOGGING----- generated vector_store") # setting up the storage for the embeddings storage_context = StorageContext.from_defaults(vector_store = vector_store) print("-----LOGGING----- generated storage_context") # set up the index index = VectorStoreIndex(nodes, storage_context = storage_context,) print("-----LOGGING----- generated index") query_engine = index.as_query_engine() response = query_engine.query("What is control?") print(response)