Skip to main content

Documentation Index

Fetch the complete documentation index at: https://docs.vectorshift.ai/llms.txt

Use this file to discover all available pages before exploring further.

import vectorshift
from vectorshift.pipeline import (
    Pipeline,
    InputNode,
    KnowledgeBaseNode,
    OutputNode,
    LlmNode,
)
from vectorshift import KnowledgeBase

# Set API key
vectorshift.api_key = "your api key here"

# Create input node for user query
input_node = InputNode(
    node_name="Query",
)

# Fetch knowledge base
knowledge_base = KnowledgeBase.fetch(name="your knowledge base name here")

# Create knowledge base node to retrieve relevant documents
knowledge_base_node = KnowledgeBaseNode(
    query=input_node.text,
    knowledge_base=knowledge_base,
    format_context_for_llm=True,
)

# Create LLM node that uses both the query and retrieved documents
llm_node = LlmNode(
    system="You are a helpful assistant that answers questions based on the provided context documents.",
    prompt=f"Query: {input_node.text}\n\nContext: {knowledge_base_node.formatted_text}",
    provider="openai",
    model="gpt-4o-mini",
    temperature=0.7,
)

# Create output node for the LLM response
output_node = OutputNode(node_name="Response", value=llm_node.response)

# Create the RAG pipeline
PIPELINE_NAME = "rag-pipeline"
try:
    rag_pipeline = Pipeline.fetch(name=PIPELINE_NAME)
    print(f"Pipeline fetched: id={rag_pipeline.id}, branch_id={rag_pipeline.branch_id}")
except Exception as e:
    print(f"Error fetching pipeline: {e}")
    rag_pipeline = Pipeline.new(
        name=PIPELINE_NAME,
        nodes=[input_node, knowledge_base_node, llm_node, output_node],
    )
    print(f"Pipeline created: id={rag_pipeline.id}, branch_id={rag_pipeline.branch_id}")
Source: examples/pipelines/rag_pipeline.py in the SDK repo.