token_counter = TokenCountingHandler( tokenizer=tiktoken.encoding_for_model("gpt-3.5-turbo-16k").encode ) callback_manager = CallbackManager([token_counter]) llm_predictor = LLMPredictor( llm=ChatOpenAI(model_name='gpt-3.5-turbo-16k', temperature=0) )