service_context = ServiceContext.from_defaults(callback_manager=callback_manager, llm=OpenAI(model="davinci:ft-personal:random-2023-02-08-18-09-09", temperature=0, max_tokens=1000), chunk_size=1024, node_parser=node_parser )