Python SDK
Official Python client for the Creed Space Safety API.
Installation
bash
pip install creed-sdkQuick Start
python
from creed_sdk import CreedSpace
# Initialize the client
client = CreedSpace(api_key="cs_live_...")
# Evaluate content
result = client.safety.evaluate("Hello, how can I help?")
print(f"Risk score: {result.risk_score}")
print(f"Decision: {result.decision}")
print(f"Blocked: {result.is_blocked}")Configuration
python
from creed_sdk import CreedSpace
client = CreedSpace(
api_key="cs_live_...",
base_url="https://api.creed.space", # Optional
timeout=30.0 # Request timeout in seconds
)Environment Variables
You can also use environment variables:
bash
export CREED_API_KEY="cs_live_..."python
import os
from creed_sdk import CreedSpace
client = CreedSpace(api_key=os.environ["CREED_API_KEY"])Safety Evaluation
Basic Evaluation
python
result = client.safety.evaluate("User message here")
if result.decision == "permit":
# Safe to proceed
process_message(message)
elif result.decision == "forbid":
# Block the content
print("Content blocked")With Context
python
result = client.safety.evaluate(
"I want to kill the boss",
context={
"topic": "video_game",
"platform": "gaming"
},
session_id="user-123-session-456"
)Response Object
python
result = client.safety.evaluate(text)
result.risk_score # float: 0.0 - 1.0
result.decision # str: "permit", "forbid", "divert", "depends"
result.is_blocked # bool: Should content be blocked?
result.matched_rules # list[str]: Rules that matched
result.decision_time_ms # float: Processing timePDP Adjudication
For complex policy decisions:
python
decision = client.pdp.adjudicate(
content="Content to evaluate",
context={"key": "value"}
)
print(f"Verdict: {decision.verdict}")
print(f"Findings: {decision.findings}")Constitutions
List Available Constitutions
python
constitutions = client.constitutions.list()
for c in constitutions:
print(f"{c.id}: {c.name}")Get Merged Constitution
python
merged = client.constitutions.get_merged(persona="ambassador")
print(merged.principles)Error Handling
python
from creed_sdk import CreedSpace
from creed_sdk.exceptions import (
CreedSpaceError,
AuthenticationError,
RateLimitError
)
client = CreedSpace(api_key="...")
try:
result = client.safety.evaluate(text)
except AuthenticationError:
print("Invalid API key")
except RateLimitError as e:
print(f"Rate limited. Retry after {e.retry_after}s")
except CreedSpaceError as e:
print(f"API error: {e}")Async Support
For async applications:
python
from creed_sdk import AsyncCreedSpace
async def main():
client = AsyncCreedSpace(api_key="...")
result = await client.safety.evaluate("Hello")
print(result.decision)
await client.close()Or use as context manager:
python
async with AsyncCreedSpace(api_key="...") as client:
result = await client.safety.evaluate("Hello")Type Hints
The SDK is fully typed for IDE support:
python
from creed_sdk import CreedSpace
from creed_sdk.types import EvaluateResponse
def process(result: EvaluateResponse) -> None:
if result.risk_score > 0.5:
print("High risk content")Examples
Chat Moderation
python
def moderate_message(user_id: str, message: str) -> str:
result = client.safety.evaluate(
message,
context={"user_id": user_id}
)
if result.decision == "forbid":
return "Your message was blocked for policy violations."
if result.decision == "divert":
queue_for_review(user_id, message)
return "Your message is being reviewed."
return None # Message is safe
# Usage
warning = moderate_message("user123", user_input)
if warning:
print(warning)
else:
send_message(user_input)Batch Processing
python
messages = ["Message 1", "Message 2", "Message 3"]
# Process in parallel with asyncio
import asyncio
async def evaluate_batch(messages: list[str]):
async with AsyncCreedSpace(api_key="...") as client:
tasks = [client.safety.evaluate(m) for m in messages]
return await asyncio.gather(*tasks)
results = asyncio.run(evaluate_batch(messages))