Seems to be working now. Though I do get another error message:
ModuleNotFoundError Traceback (most recent call last)
Input In [5], in <cell line: 1>()
----> 1 from llama_index.llms.openai import OpenAI
2 from llama_index.core.evaluation import (
3 FaithfulnessEvaluator,
4 RelevancyEvaluator,
5 CorrectnessEvaluator,
6 )
8 from llama_index.core import Settings
File ~\anaconda3\lib\site-packages\llama_index\llms\openai\__init__.py:1, in <module>
----> 1 from llama_index.llms.openai.base import AsyncOpenAI, OpenAI, SyncOpenAI, Tokenizer
3 __all__ = ["OpenAI", "Tokenizer", "SyncOpenAI", "AsyncOpenAI"]
File ~\anaconda3\lib\site-packages\llama_index\llms\openai\base.py:48, in <module>
46 from llama_index.core.llms.llm import LLM
47 from llama_index.core.types import BaseOutputParser, PydanticProgramMode
---> 48 from llama_index.llms.openai.utils import (
49 create_retry_decorator,
50 from_openai_message,
51 from_openai_token_logprobs,
52 from_openai_completion_logprobs,
53 is_chat_model,
54 is_function_calling_model,
55 openai_modelname_to_contextsize,
56 resolve_openai_credentials,
57 to_openai_message_dicts,
58 )
60 from openai import AsyncOpenAI, AzureOpenAI
61 from openai import OpenAI as SyncOpenAI
File ~\anaconda3\lib\site-packages\llama_index\llms\openai\utils.py:24, in <module>
22 from openai.types.chat.chat_completion_chunk import ChoiceDeltaToolCall
23 from openai.types.chat.chat_completion_message import ChatCompletionMessage
---> 24 from openai.types.chat.chat_completion_token_logprob import ChatCompletionTokenLogprob
25 from openai.types.completion_choice import Logprobs
26 from openai.types.completion import Completion
ModuleNotFoundError: No module named 'openai.types.chat.chat_completion_token_logprob'
Should I just revert to an earlier release?