Guides
Best Practices

Best Practices

Recommendations for integrating GlyphNet effectively.

Security

Store Keys Securely

# Good: Environment variable
import os
api_key = os.environ.get("GLYPHNET_API_KEY")
 
# Bad: Hardcoded
api_key = "gn_live_abc123..."  # Never do this!

Use Server-Side Only

Never expose your API key in client-side code:

// Bad: Browser code
const response = await fetch('https://api.glyphnet.io/v1/verify', {
  headers: { 'X-API-Key': 'gn_live_...' }  // Visible to users!
});
 
// Good: Server-side proxy
// Frontend
const response = await fetch('/api/verify', {
  method: 'POST',
  body: JSON.stringify({ text })
});
 
// Backend (Next.js API route)
export async function POST(req) {
  const { text } = await req.json();
  const result = await fetch('https://api.glyphnet.io/v1/verify', {
    headers: { 'X-API-Key': process.env.GLYPHNET_API_KEY }
  });
  return Response.json(await result.json());
}

Rotate Keys Regularly

Create new keys and revoke old ones periodically:

# Monthly key rotation script
def rotate_key():
    # Create new key
    new_key = client.create_key(name=f"Production-{datetime.now().strftime('%Y-%m')}")
 
    # Update environment
    update_secrets(new_key["api_key"])
 
    # Revoke old key after confirming new one works
    client.revoke_key(old_key_id)

Performance

Batch Text When Possible

# Slow: Multiple requests
for article in articles:
    result = client.verify(article.summary)  # N requests
 
# Fast: Single request
combined = "\n\n".join(a.summary for a in articles)
result = client.verify(combined)  # 1 request

Cache Verification Results

import redis
import hashlib
import json
 
redis_client = redis.Redis()
CACHE_TTL = 3600  # 1 hour
 
def verify_with_cache(text: str) -> dict:
    # Generate cache key
    cache_key = f"glyphnet:{hashlib.sha256(text.encode()).hexdigest()}"
 
    # Check cache
    cached = redis_client.get(cache_key)
    if cached:
        return json.loads(cached)
 
    # Verify and cache
    result = client.verify(text)
    redis_client.setex(cache_key, CACHE_TTL, json.dumps(result))
 
    return result

Use Async for High Volume

import asyncio
import aiohttp
 
async def verify_batch(texts: list[str]) -> list[dict]:
    async with aiohttp.ClientSession() as session:
        tasks = [verify_async(session, text) for text in texts]
        return await asyncio.gather(*tasks)
 
async def verify_async(session, text: str) -> dict:
    async with session.post(
        "https://api.glyphnet.io/v1/verify",
        headers={"X-API-Key": API_KEY},
        json={"text": text}
    ) as response:
        return await response.json()

Error Handling

Implement Retries

from tenacity import retry, stop_after_attempt, wait_exponential
 
@retry(
    stop=stop_after_attempt(3),
    wait=wait_exponential(multiplier=1, min=1, max=60)
)
def verify_with_retry(text: str) -> dict:
    response = requests.post(
        "https://api.glyphnet.io/v1/verify",
        headers={"X-API-Key": API_KEY},
        json={"text": text}
    )
    response.raise_for_status()
    return response.json()

Handle Specific Errors

def verify_safely(text: str) -> dict:
    try:
        return client.verify(text)
    except RateLimitError:
        # Queue for later
        queue.add(text)
        return {"pending": True}
    except QuotaExceededError:
        # Alert and fallback
        alert_ops("Monthly quota exceeded!")
        return {"verified": None, "reason": "quota_exceeded"}
    except AuthenticationError:
        # Critical error
        alert_critical("API key invalid!")
        raise

Log Request IDs

def verify_with_logging(text: str) -> dict:
    response = requests.post(
        "https://api.glyphnet.io/v1/verify",
        headers={"X-API-Key": API_KEY},
        json={"text": text}
    )
 
    result = response.json()
 
    # Log for debugging
    logger.info(f"Verification complete", extra={
        "request_id": result.get("request_id"),
        "claims": result.get("summary", {}).get("total_claims"),
        "verified": result.get("summary", {}).get("verified"),
        "processing_time": result.get("processing_time_ms")
    })
 
    return result

Integration Patterns

Middleware Pattern

from fastapi import FastAPI, Request
 
app = FastAPI()
 
@app.middleware("http")
async def verify_ai_content(request: Request, call_next):
    response = await call_next(request)
 
    # Only verify AI-generated responses
    if response.headers.get("X-AI-Generated") == "true":
        content = await response.body()
        verification = await client.verify(content.decode())
 
        if verification["flagged"]:
            response.headers["X-Verification-Warning"] = "true"
 
    return response

Queue-Based Processing

from celery import Celery
 
celery = Celery('tasks')
 
@celery.task
def verify_content(content_id: str):
    content = db.get_content(content_id)
    result = client.verify(content.text)
 
    db.update_content(content_id, {
        "verified": not result["flagged"],
        "confidence": result["summary"]["avg_confidence"],
        "verification_result": result
    })

Streaming Verification

For long content, verify in chunks:

def verify_streaming(long_text: str, chunk_size: int = 5000):
    chunks = [long_text[i:i+chunk_size] for i in range(0, len(long_text), chunk_size)]
    results = []
 
    for chunk in chunks:
        result = client.verify(chunk)
        results.append(result)
 
    # Aggregate results
    return {
        "total_claims": sum(r["summary"]["total_claims"] for r in results),
        "verified": sum(r["summary"]["verified"] for r in results),
        "avg_confidence": sum(r["summary"]["avg_confidence"] for r in results) / len(results),
        "flagged": any(r["flagged"] for r in results)
    }

Monitoring

Track Key Metrics

# Track in your monitoring system
def track_verification(result: dict):
    metrics.gauge("glyphnet.claims.total", result["summary"]["total_claims"])
    metrics.gauge("glyphnet.claims.verified", result["summary"]["verified"])
    metrics.gauge("glyphnet.confidence.avg", result["summary"]["avg_confidence"])
    metrics.histogram("glyphnet.latency", result["processing_time_ms"])
 
    if result["flagged"]:
        metrics.increment("glyphnet.flagged")

Set Up Alerts

  • Usage approaching limit (80%, 90%)
  • High rate of unverified claims
  • Increased latency
  • Error rate spikes