diff --git a/llama-index-cli/llama_index/cli/rag/base.py b/llama-index-cli/llama_index/cli/rag/base.py
index 03b4942a7..022ff95f0 100644
--- a/llama-index-cli/llama_index/cli/rag/base.py
+++ b/llama-index-cli/llama_index/cli/rag/base.py
@@ -5,6 +5,11 @@ from argparse import ArgumentParser
from glob import iglob
from pathlib import Path
from typing import Any, Callable, Dict, Optional, Union, cast
+from llama_index.llms.openai_like import OpenAILike
+# from llama_index.core.base.embeddings.openai import OpenAIEmbedding
+from llama_index.embeddings.openai import OpenAIEmbedding
+
+
from llama_index.core import (
SimpleDirectoryReader,
@@ -33,7 +38,8 @@ def _try_load_openai_llm():
try:
from llama_index.llms.openai import OpenAI # pants: no-infer-dep
- return OpenAI(model="gpt-3.5-turbo", streaming=True)
+ return OpenAILike(api_base="http://localhost:8080/v1", model="gpt-4", api_key="nothing", is_local = True, context_window = 8192)
+ # return OpenAI(model="gpt-3.5-turbo", streaming=True)
except ImportError:
raise ImportError(
"
llama-index-llms-openai
package not found, "
@@ -120,7 +126,7 @@ class RagCLI(BaseModel):
llm = cast(LLM, values["llm"])
# get embed_model from transformations if possible
+ embed_model = OpenAIEmbedding(model_name="gpt-3.5-turbo", api_base="http://localhost:8080/v1", api_key="nothing", is_local=True, context_window = 8192)
if ingestion_pipeline.transformations is not None:
for transformation in ingestion_pipeline.transformations:
if isinstance(transformation, BaseEmbedding):