Find answers from the community

Updated 3 months ago

```Enter Query do you offer Unity3D

Plain Text
Enter Query: do you offer Unity3D?
INFO:llama_index.token_counter.token_counter:> [retrieve] Total LLM token usage: 0 tokens
INFO:llama_index.token_counter.token_counter:> [retrieve] Total embedding token usage: 7 tokens
INFO:llama_index.token_counter.token_counter:> [get_response] Total LLM token usage: 1803 tokens
INFO:llama_index.token_counter.token_counter:> [get_response] Total embedding token usage: 0 tokens
INFO:llama_index.token_counter.token_counter:> [get_response] Total LLM token usage: 1803 tokens
INFO:llama_index.token_counter.token_counter:> [get_response] Total embedding token usage: 0 tokens
Output: Yes, they offer resources skilled in Unity3D for game development.
How do i get rid of INFO?
L
S
18 comments
oh boy, this looks like an old version of llamaindex lol

Try this maybe?

Plain Text
import logging
logging.getLogger().handlers.clear()
Thanks a lot. I was confused with the latest llama version so i rolled back to a prior version
Would you be able to help me navigate around the newer version?
Of course! What was confusing you with the newer version?
Plain Text
from langchain import OpenAI
from llama_index import SimpleDirectoryReader, LangchainEmbedding, GPTListIndex, GPTVectorStoreIndex, PromptHelper, StorageContext, load_index_from_storage
from llama_index import LLMPredictor, ServiceContext

import os
from dotenv import dotenv_values


os.environ['OPENAI_API_KEY'] = dotenv_values()['OPENAI_API_KEY']


def construct_index(directory_path):
    # set maximum input size
    max_input_size = 4096
    # set number of output tokens
    num_outputs = 256
    # set maximum chunk overlap
    max_chunk_overlap = 0.2
    # set chunk size limit
    chunk_size_limit = 600
    prompt_helper = PromptHelper(max_input_size, num_outputs, max_chunk_overlap, chunk_size_limit=chunk_size_limit)
    # define LLM
    llm_predictor = LLMPredictor(llm=OpenAI(temperature=0.2, model_name="gpt-3.5-turbo", max_tokens=num_outputs))
    documents = SimpleDirectoryReader(directory_path).load_data()
    service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor)
    index = GPTVectorStoreIndex.from_documents(documents, service_context=service_context)
    index.storage_context.persist('index.json')
    return index

def ask_bot(query, input_index='index.json'):
    storage_context = StorageContext.from_defaults(persist_dir=input_index)
    index = load_index_from_storage(storage_context)
    query_engine = index.as_query_engine(response_mode='compact')
    response = query_engine.query(query)
    response = str(response)
    print("Output: " + response)
    return response

# construct_index('knowledge/')
This is the current code I am working with. I was quite riddled with save_to_disk and load_from_disk and some documents and github issues indicated using storagecontext. What can be modified to leverage the newer version of llama-index?
Here's an update version of the code

Plain Text
from llama_index import SimpleDirectoryReader, ListIndex, VectorStoreIndex, StorageContext, load_index_from_storage
from llama_index import ServiceContext
from llama_index.llms import OpenAI

import os
from dotenv import dotenv_values


os.environ['OPENAI_API_KEY'] = dotenv_values()['OPENAI_API_KEY']

def get_service_context():
    chunk_size = 600
    num_outputs = 256
    context_window = 4096

    # define LLM
    llm = OpenAI(temperature=0.2, model="gpt-3.5-turbo", max_tokens=num_outputs)
    
    service_context = ServiceContext.from_defaults(
      llm=llm,
      chunks_size=chunk_size,
      context_window=context_window,
    ) 
    
    return service_context

def construct_index(directory_path):   
    service_context = get_service_context()    

    documents = SimpleDirectoryReader(directory_path).load_data()
    index = GPTVectorStoreIndex.from_documents(documents, service_context=service_context)
    index.storage_context.persist(persist_dir='./storage')
    return index

def ask_bot(query, input_index_dir='./storage'):
    service_context = get_service_context()
    storage_context = StorageContext.from_defaults(persist_dir=input_index_dir)

    index = load_index_from_storage(storage_context, service_context=service_context)
    query_engine = index.as_query_engine(
        response_mode='compact',
        similarity_top_k=3,  # bump up the top k from 2 due to smaller chunk size?
    )
    response = query_engine.query(query)
    response = str(response)
    print("Output: " + response)
    return response

# construct_index('knowledge/')
thank you so much!
what version should i upgrade to?
Likely the latest! 0.8.9
thanks a lot!
Hey, it is raising an authentication error despite of having a working API key
Plain Text
File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/util.py", line 186, in default_api_key
    raise openai.error.AuthenticationError(
openai.error.AuthenticationError: No API key provided. You can set your API key in code using 'openai.api_key = <API-KEY>', or you can set the environment variable OPENAI_API_KEY=<API-KEY>). If your API key is stored in a file, you can point the openai module at it with 'openai.api_key_path = <PATH>'. You can generate API keys in the OpenAI web interface. See https://platform.openai.com/account/api-keys for details.
I've seen this sometimes in threaded apps, or in notebooks.

In addition to os.environ, try setting the key on the module?

openai.api_key = "sk-..."
within the llama index module or my module?
I also assigned the api key in the llm OpenAI function
okay within my module it worked! haha
Thanks a lot man
I will be bothering you further with my queries
Add a reply
Sign up and join the conversation on Discord