Everything you need to integrate and use RAGView
Get up and running with RAGView in under 5 minutes.
# Install via pip
pip install ragview
# Or via npm
npm install @ragview/sdk
import ragview
# Initialize with your API key
ragview.init(api_key="your_api_key_here")
@ragview.trace()
def retrieve_and_generate(query):
# Your retrieval logic
chunks = vector_db.search(query, k=5)
# Your generation logic
response = llm.generate(query, context=chunks)
return response
Visit your RAGView dashboard to see real-time diagnostics, pollution tracking, and performance metrics.
Initialize the RAGView client with your credentials.
ragview.init(
api_key="your_api_key",
project_name="my-rag-project",
environment="production"
)
Decorator to automatically capture retrieval and generation metrics.
@ragview.trace(
capture_input=True,
capture_output=True,
track_chunks=True
)
def my_rag_function(query):
pass
Manually log retrieval results for analysis.
ragview.log_retrieval(
query="user question",
chunks=[
{"text": "chunk content", "score": 0.95},
{"text": "another chunk", "score": 0.87}
],
metadata={"retrieval_method": "hybrid"}
)
Analyze context pollution for a given prompt.
result = ragview.analyze_pollution(
query="user question",
context="full context string"
)
print(result.pollution_score)
print(result.polluted_segments)
const RAGView = require('@ragview/sdk');
const ragview = new RAGView({
apiKey: 'your_api_key',
projectName: 'my-rag-project'
});
async function retrieveAndGenerate(query) {
const trace = ragview.startTrace();
const chunks = await vectorDB.search(query, { k: 5 });
trace.logRetrieval(query, chunks);
const response = await llm.generate(query, chunks);
trace.logGeneration(response);
await trace.end();
return response;
}
All API requests require an API key in the Authorization header:
Authorization: Bearer your_api_key_here
Submit a trace for analysis.
{
"query": "user question",
"chunks": [
{
"text": "chunk content",
"score": 0.95,
"metadata": {}
}
],
"response": "generated answer",
"timestamp": "2026-03-22T10:30:00Z"
}
Retrieve aggregated metrics for your project.
GET /api/v1/metrics?start_date=2026-03-01&end_date=2026-03-22
Analyze context pollution for a given prompt.
{
"query": "user question",
"context": "full context string"
}