Find answers from the community

Updated 3 months ago

```python

Plain Text
%pip -q install langchain==0.0.148 openai
%pip -q install llama_index==0.5.6

from llama_index import SimpleDirectoryReader, GPTListIndex, readers, GPTSimpleVectorIndex, LLMPredictor, PromptHelper, ServiceContext
from langchain import OpenAI
import sys
import os
from IPython.display import Markdown, display

def construct_index(directory_path):
    # set maximum input size
    max_input_size = 4096
    # set number of output tokens
    num_outputs = 30000
    # set maximum chunk overlap
    max_chunk_overlap = 60
    # set chunk size limit
    chunk_size_limit = 6024 

    # define prompt helper
    prompt_helper = PromptHelper(max_input_size, num_outputs, max_chunk_overlap, chunk_size_limit=chunk_size_limit)

    # define LLM
    llm_predictor = LLMPredictor(llm=OpenAI(temperature=0.5, model_name="gpt-3.5-turbo-0613", max_tokens=num_outputs))
 
    documents = SimpleDirectoryReader(directory_path).load_data()
    
    service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, prompt_helper=prompt_helper)
    index = GPTSimpleVectorIndex.from_documents(documents, service_context=service_context)

    index.save_to_disk('index.json')

    return index

def ask_ai():
    index = GPTSimpleVectorIndex.load_from_disk('index.json')
    while True: 
        query = input("What do you want to ask? ")
        response = index.query(query)
        display(Markdown(f"Response: <b>{response.response}</b>"))
construct_index("data/")
ask_ai()
Hey guys I can't increase my response length, what am I doing wrong? As u can see I increased the outputs and chunks and everything and no matter what I do it doesn't increase my output
L
S
38 comments
blast from the past with those library versions lol

index = GPTSimpleVectorIndex.load_from_disk('index.json')

You should add the service context here (without it, the settings go back to defaults)

index = GPTSimpleVectorIndex.load_from_disk('index.json', service_context=service_context)
Plain Text
from llama_index import SimpleDirectoryReader, GPTListIndex, readers, GPTVectorStoreIndex, LLMPredictor, PromptHelper, ServiceContext, StorageContext, load_index_from_storage
from langchain import OpenAI
import sys
import os
from IPython.display import Markdown, display

def construct_index(directory_path):
    # set maximum input size
    max_input_size = 4096
    # set number of output tokens
    num_outputs = 10000
    # set maximum chunk overlap
    max_chunk_overlap = 20
    # set chunk size limit
    chunk_size_limit = 600 

    # define prompt helper
    # prompt_helper = PromptHelper(max_input_size, num_outputs, max_chunk_overlap, chunk_size_limit=chunk_size_limit)

    # define LLM
    llm_predictor = LLMPredictor(llm=OpenAI(temperature=0.5, model_name="gpt-3.5-turbo-0613", max_tokens=num_outputs))
 
    documents = SimpleDirectoryReader(directory_path).load_data()
    

    # storage_context = StorageContext.from_defaults()
    # storage_context.persist(persist_dir="/Users/og/Downloads/RozGPT-main")

    # service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, prompt_helper=prompt_helper)
    service_context = ServiceContext.from_defaults(
            llm_predictor=llm_predictor,
            chunk_size=1024
        )
    index = GPTVectorStoreIndex.from_documents(documents, service_context=service_context)

    storage_context = StorageContext.from_defaults()
    storage_context.persist(persist_dir="/Users/og/Downloads/RozGPT-main")
    storage_context = StorageContext.from_defaults(persist_dir="")

    # index.save_to_disk('index.json')

    return index

def ask_ai():
    index = load_index_from_storage(storage_context,service_context=service_context)
    # index = load_index_from_storage(storage_context,service_context=service_context)
    while True: 
        query = input("What do you want to ask? ")
        response = index.query(query)
        display(Markdown(f"Response: <b>{response.response}</b>"))
I tried this
This is the new code but I keep getting errors
Also I get service_context isn't defined if i add it to that old code
Plain Text
Cell In[23], line 34, in ask_ai()
     32 def ask_ai():
     33     # index = GPTSimpleVectorIndex.load_from_disk('index.json')
---> 34     index = GPTSimpleVectorIndex.load_from_disk('index.json', service_context=service_context) 
     35     while True: 
     36         query = input("What do you want to ask? ")

NameError: name 'service_context' is not defined
right, you'll need to also define that (sorry, should have clarified that lol)
Also, if you are using gpt-3.5, you'll want to use ChatOpenAI instead of OpenAI

from langchain.chat_models import ChatOpenAI
llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0.5, model_name="gpt-3.5-turbo-0613", max_tokens=num_outputs))
Ok I swapped it out
Plain Text
def construct_index(directory_path):
    # set maximum input size
    max_input_size = 4096
    # set number of output tokens
    num_outputs = 30000
    # set maximum chunk overlap
    max_chunk_overlap = 60
    # set chunk size limit
    chunk_size_limit = 6024 

    # define prompt helper
    prompt_helper = PromptHelper(max_input_size, num_outputs, max_chunk_overlap, chunk_size_limit=chunk_size_limit)

    # define LLM
    # llm_predictor = LLMPredictor(llm=OpenAI(temperature=0.5, model_name="gpt-3.5-turbo-0613", max_tokens=num_outputs))
    llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0.5, model_name="gpt-3.5-turbo-0613", max_tokens=num_outputs))
    documents = SimpleDirectoryReader(directory_path).load_data()
    
    service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, prompt_helper=prompt_helper)
    index = GPTSimpleVectorIndex.from_documents(documents, service_context=service_context)

    index.save_to_disk('index.json')

    return index

def ask_ai():
    service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, prompt_helper=prompt_helper)
    # index = GPTSimpleVectorIndex.load_from_disk('index.json')
    index = GPTSimpleVectorIndex.load_from_disk('index.json', service_context=service_context) 
    while True: 
        query = input("What do you want to ask? ")
        response = index.query(query)
        display(Markdown(f"Response: <b>{response.response}</b>"))
How does this look
Plain Text
NameError                                 Traceback (most recent call last)
Cell In[5], line 1
----> 1 ask_ai()

Cell In[2], line 34, in ask_ai()
     33 def ask_ai():
---> 34     service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, prompt_helper=prompt_helper)
     35     # index = GPTSimpleVectorIndex.load_from_disk('index.json')
     36     index = GPTSimpleVectorIndex.load_from_disk('index.json', service_context=service_context) 

NameError: name 'llm_predictor' is not defined
It just keeps wanting more and more from the function above, I gotta be doing something wrong lol
Try this instead, need to make sure all variables are in scope πŸ™‚

Plain Text
# set maximum input size
max_input_size = 4096
# set number of output tokens
num_outputs = 512
# set maximum chunk overlap
max_chunk_overlap = 60
# set chunk size limit
chunk_size_limit = 1024

# define prompt helper
prompt_helper = PromptHelper(max_input_size, num_outputs, max_chunk_overlap, chunk_size_limit=chunk_size_limit)

# define LLM
# llm_predictor = LLMPredictor(llm=OpenAI(temperature=0.5, model_name="gpt-3.5-turbo-0613", max_tokens=num_outputs))
llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0.5, model_name="gpt-3.5-turbo-0613", max_tokens=num_outputs))
     
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, prompt_helper=prompt_helper, chunk_size_limit=chunk_size_limit)

def construct_index(directory_path):
    documents = SimpleDirectoryReader(directory_path).load_data()

    index = GPTSimpleVectorIndex.from_documents(documents, service_context=service_context)

    index.save_to_disk('index.json')

    return index

def ask_ai():
    index = GPTSimpleVectorIndex.load_from_disk('index.json', service_context=service_context) 
    while True: 
        query = input("What do you want to ask? ")
        response = index.query(query)
        display(Markdown(f"Response: <b>{response.response}</b>"))
(I also adjusted some parameters, so that they will actually work)
i just increase num outputs to make it write longer now right?
i want it to write essays
ValueError: Got a larger chunk overlap (60) than chunk size (-936), should be smaller.
I icnreased num output got this
Well, you can't increase it forever. There are some limitations (as you can see)
I can explain it
whats the max I can set
to get maximum output
Models like OpenAI are decoder models. What this means is that they generate one token at a time, add it to the input, and generate the next token

So, the max_input_size is 4096. If I set num_output to 512, that means I have to leave room for 512 tokens. This means the maximum prompt length that can be sent to the LLM is 4096 minus 512
So as you increase num_ouput, there is less room for actual input
and if you increase it too far, you'll get errors like you got above
ok gotcha gotcha
does the new one have a larger limitation?
new libraries sorry
Yea, there are 8k, 16k and 32k versions, although I'm not sure if they've given access to those for everyone, or if they are still on a waitlist

Plain Text
gpt-3.5-turbo-16k  # 16k input
gpt-3.5-turbo-16k-0613  # 16k input
gpt-4  # 8k input
gpt-4-0613  # 8k input
gpt-4-32k  # 32k input
gpt-4-32k-0613  # 32k input
keep in mind though the token costs as well πŸ˜…
I just swap the model in here right
Plain Text
 llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0.5, model_name="gpt-3.5-turbo-0613", max_tokens=num_outputs))
yea right there
Add a reply
Sign up and join the conversation on Discord