SDK Reference
Complete API reference for the runesec Python SDK.
Shield
The main entry point. Orchestrates scanning, policy evaluation, and event emission.
from rune import Shield
shield = Shield(
api_key="rune_live_xxx", # API key (or set RUNE_API_KEY env var)
config=ShieldConfig(...), # Optional full config object
l1_enabled=True, # L1 regex scanning (default: True)
l2_enabled=False, # L2 vector scanning (requires extras)
l3_enabled=False, # L3 LLM judge (requires API key)
on_block="raise", # "raise" | "log" | "alert"
scan_timeout_ms=5000, # Max scan time before proceeding
default_agent_id="my-agent", # Default agent ID for events
default_agent_tags=["prod"], # Default tags
local_policies_path="./policies", # Load YAML policies from disk
policy_sync_interval_seconds=60, # Cloud policy sync interval
)shield.scan_input()
Scan text for threats. Runs L1 (regex) and optionally L2 (vector) synchronously with a target of under 10ms.
result = shield.scan_input(
content="user message here",
context={"agent_id": "my-agent", "source": "chat"}, # optional
)
# ScanResult fields:
result.blocked # bool — whether the input should be blocked
result.risk_score # float 0-1 — overall risk score
result.threat_type # str | None — "prompt_injection", "jailbreak", etc.
result.explanation # str | None — human-readable explanation
result.scan_latency_ms # float — time spent scanning
result.pii_detected # bool — whether PII was found
result.secrets_detected # bool — whether secrets/API keys were found
result.layers_executed # list[str] — which scanning layers ran (e.g. ["l1", "l2"])
result.l1_result # str — "pass", "block", or "flag"
result.l2_result # str | None — L2 result if executed
result.l3_result # str | None — L3 result if executed
result.sanitized_content # str | None — content with threats removed
result.threats_detected # list[dict] — detailed threat informationshield.scan_output()
Scan LLM output for data leaks. Same as scan_input but adds secrets/PII detection (SSN, credit cards, API keys, etc).
result = shield.scan_output(
content=llm_response_text,
context={"agent_id": "my-agent", "direction": "inbound"},
)
if result.secrets_detected:
print("LLM leaked a secret!")shield.scan_deep()
Full scanning pipeline including L3 LLM judge. Async only. Use for high-value inputs where you need maximum accuracy.
result = await shield.scan_deep(
content="potentially adversarial input",
context={"agent_id": "my-agent"},
)shield.validate_action()
Check an action against security policies before execution.
result = shield.validate_action(
agent_id="my-agent",
action="salesforce.update",
params={"record_id": "001xx", "field": "Status"},
policies=["no-salesforce-deletes"], # optional: specific policies
agent_tags=["prod", "sales"], # optional: for policy matching
)
# PolicyResult fields:
result.allowed # bool — whether the action is permitted
result.blocked # bool — opposite of allowed (property)
result.violations # list[dict[str, Any]] — policy rule violations with details
result.evaluated_policies # list[str] — which policies were evaluated
result.action_taken # str — "allow", "block", "alert", or "log"
result.policy_latency_ms # float — time spent evaluating policies@shield.protect()
Decorator that wraps a function with full scan + policy + output scanning pipeline. Works with both sync and async functions.
@shield.protect(
agent_id="my-agent", # override default agent ID
policies=["allowed-tools"], # specific policies to check
on_block="raise", # "raise" | "log" | "alert"
agent_tags=["prod"], # for policy matching
)
async def execute_tool(tool_name: str, params: dict) -> dict:
return await call_api(tool_name, **params)The decorator runs this pipeline:
- Scan input arguments
- Evaluate security policies
- Check anomaly detection
- Execute the wrapped function
- Scan the output
- Emit security event (non-blocking)
ShieldConfig
from rune.config import ShieldConfig
config = ShieldConfig(
# Authentication
api_key="rune_live_xxx", # or RUNE_API_KEY env var
# Cloud endpoints
events_endpoint="https://events.runesec.dev", # or RUNE_EVENTS_ENDPOINT
api_endpoint="https://api.runesec.dev", # or RUNE_API_ENDPOINT
# Scanner configuration
l1_enabled=True, # Regex pattern matching
l2_enabled=False, # Vector similarity (needs extras)
l3_enabled=False, # LLM judge (needs API key)
l3_blocking=False, # If True, L3 blocks synchronously
# L2 thresholds
l2_block_threshold=0.85, # Similarity score to block
l2_flag_threshold=0.70, # Similarity score to flag
# Behavioral
on_block="raise", # "raise" | "log" | "alert"
scan_timeout_ms=5000, # Max scanning time
connection_timeout_seconds=10.0, # HTTP timeout for cloud calls
# Policy settings
policy_sync_interval_seconds=60, # How often to sync policies
local_policies_path="./policies", # Load YAML from disk
# Event emission
event_batch_size=50, # Events per batch
event_flush_interval_seconds=5.0, # Flush interval
events_enabled=True, # Set False to disable events
# Rate limiting
default_rate_limit_per_minute=100,
# Agent identification
default_agent_id="my-agent", # or RUNE_AGENT_ID env var
default_agent_tags=["production"], # or RUNE_AGENT_TAGS env var
# Debug
debug=False, # or RUNE_DEBUG env var
)Utility Methods
# Redact all secrets/PII in text
clean = shield.redact("My SSN is 123-45-6789")
# "My SSN is [REDACTED]"
# Flush pending events to cloud
await shield.flush()
# Graceful shutdown
await shield.close() # async
shield.shutdown() # sync
# Stats
print(shield.stats)
# {"events_sent_total": 42, "events_failed_total": 0, ...}ShieldBlockedError
Raised when on_block="raise" and a scan or policy check blocks an action.
from rune.shield import ShieldBlockedError
try:
result = await execute_tool("delete_all", {})
except ShieldBlockedError as e:
print(e) # Human-readable message
print(e.scan_result) # ScanResult or None
print(e.policy_result) # PolicyResult or None