Use Cases
Practical implementation patterns for common IdentityCall scenarios. Each use case includes context, implementation approach, and code examples.
Customer Service Quality Assurance
Context
Contact centers need to evaluate agent performance across thousands of calls. Manual review is time-consuming and inconsistent. IdentityCall automates quality scoring through goal evaluation.
Implementation Pattern
- Define Goals - Create goals in your project that match your quality scorecard
- Upload Recordings - Send completed call recordings via API
- Retrieve Results - Poll for completion, then fetch goal evaluations
- Aggregate Scores - Calculate agent and team performance metrics
Example: Daily QA Report Generator
import requests
from datetime import datetime, timedelta
API_KEY = "idc_your_api_key"
BASE_URL = "https://api.identitycall.ai/api/v1/public"
headers = {"Authorization": f"Bearer {API_KEY}"}
def get_completed_recordings(days_back=1):
"""Fetch all completed recordings from the past N days."""
recordings = []
page = 1
while True:
response = requests.get(
f"{BASE_URL}/recordings",
headers=headers,
params={"page": page, "per_page": 100, "status": "completed"}
)
data = response.json()
recordings.extend(data["data"])
if page >= data["meta"]["total_pages"]:
break
page += 1
return recordings
def generate_qa_report(recordings):
"""Generate quality scores by agent."""
agent_scores = {}
for recording in recordings:
results = requests.get(
f"{BASE_URL}/recordings/{recording['id']}/results",
headers=headers
).json()["data"]
summary = requests.get(
f"{BASE_URL}/recordings/{recording['id']}/summary",
headers=headers
).json()["data"]
# Group by first speaker (usually agent)
agent = summary["speakers"][0] if summary["speakers"] else "Unknown"
if agent not in agent_scores:
agent_scores[agent] = {"calls": 0, "total_score": 0, "goals": {}}
agent_scores[agent]["calls"] += 1
for goal in results["goals"]:
goal_name = goal["goal_name"]
if goal_name not in agent_scores[agent]["goals"]:
agent_scores[agent]["goals"][goal_name] = {"met": 0, "total": 0}
agent_scores[agent]["goals"][goal_name]["total"] += 1
if goal["met"]:
agent_scores[agent]["goals"][goal_name]["met"] += 1
return agent_scores
# Generate report
recordings = get_completed_recordings()
report = generate_qa_report(recordings)
for agent, data in report.items():
print(f"\n{agent}: {data['calls']} calls")
for goal, scores in data["goals"].items():
pct = (scores["met"] / scores["total"]) * 100 if scores["total"] > 0 else 0
print(f" {goal}: {pct:.0f}%")Key Metrics to Track
- Goal achievement rate per agent
- Goal achievement rate per goal type
- Trend analysis over time
- Compliance violations (non-compliant pauses)
Sales Call Analysis
Context
Sales teams want to understand what makes calls successful. By analyzing emotion patterns and goal completion rates, managers can identify best practices and coaching opportunities.
Implementation Pattern
- Tag Recordings - Include call outcome (won/lost) in recording name or metadata
- Analyze Patterns - Compare goal achievement between won and lost deals
- Track Emotions - Monitor customer emotion progression during successful calls
- Identify Coaching Needs - Find agents with low goal achievement on specific objectives
Example: Win/Loss Pattern Analysis
def analyze_sales_patterns(recordings):
"""Compare patterns between won and lost deals."""
patterns = {"won": [], "lost": []}
for recording in recordings:
# Determine outcome from recording name
outcome = "won" if "won" in recording["name"].lower() else "lost"
results = requests.get(
f"{BASE_URL}/recordings/{recording['id']}/results",
headers=headers
).json()["data"]
transcription = requests.get(
f"{BASE_URL}/recordings/{recording['id']}/transcription",
headers=headers
).json()["data"]
# Calculate average customer emotion
customer_emotions = []
for dialogue in transcription["dialogues"]:
if "customer" in dialogue["speaker"].lower():
# Calculate positive emotion score
positive = dialogue["emotion"]["happy"] + dialogue["emotion"]["calm"]
negative = dialogue["emotion"]["angry"] + dialogue["emotion"]["sad"]
customer_emotions.append(positive - negative)
avg_emotion = sum(customer_emotions) / len(customer_emotions) if customer_emotions else 0
patterns[outcome].append({
"goal_achievement": recording.get("goal_achievement", 0),
"customer_emotion": avg_emotion,
"duration_ms": recording["duration_ms"]
})
# Compare averages
for outcome in ["won", "lost"]:
if patterns[outcome]:
avg_goal = sum(p["goal_achievement"] for p in patterns[outcome]) / len(patterns[outcome])
avg_emotion = sum(p["customer_emotion"] for p in patterns[outcome]) / len(patterns[outcome])
avg_duration = sum(p["duration_ms"] for p in patterns[outcome]) / len(patterns[outcome]) / 60000
print(f"\n{outcome.upper()} Deals ({len(patterns[outcome])} calls):")
print(f" Avg Goal Achievement: {avg_goal:.1f}%")
print(f" Avg Customer Sentiment: {avg_emotion:.2f}")
print(f" Avg Duration: {avg_duration:.1f} minutes")Sales Insights to Extract
- Correlation between goal achievement and deal outcome
- Optimal call duration for successful deals
- Customer emotion trajectory patterns
- Most impactful goals for conversion
Compliance Monitoring
Context
Regulated industries must verify that calls include required disclosures and appropriate wait times. Pause analysis detects compliance-relevant silences.
Implementation Pattern
- Configure Pause Thresholds - Set minimum/maximum pause durations for compliance
- Monitor Results - Check pause compliance status in analysis results
- Alert on Violations - Trigger notifications for non-compliant calls
- Generate Audit Reports - Document compliance rates for regulators
Example: Compliance Alert System
def check_compliance(recording_id):
"""Check a recording for compliance violations."""
results = requests.get(
f"{BASE_URL}/recordings/{recording_id}/results",
headers=headers
).json()["data"]
violations = []
# Check pause compliance
for pause in results["pauses"]:
if not pause["compliant"]:
violations.append({
"type": "pause_violation",
"duration_ms": pause["duration_ms"],
"timestamp_ms": pause["start_ms"]
})
# Check goal compliance (certain goals may be required)
required_goals = ["disclosure_given", "consent_obtained"]
for goal in results["goals"]:
if goal["goal_name"].lower() in required_goals and not goal["met"]:
violations.append({
"type": "goal_violation",
"goal": goal["goal_name"],
"score": goal["score"]
})
return violations
def generate_compliance_report(recordings):
"""Generate compliance report for audit."""
total_calls = len(recordings)
compliant_calls = 0
violation_summary = {}
for recording in recordings:
violations = check_compliance(recording["id"])
if not violations:
compliant_calls += 1
else:
for v in violations:
key = v["type"]
violation_summary[key] = violation_summary.get(key, 0) + 1
compliance_rate = (compliant_calls / total_calls) * 100 if total_calls > 0 else 0
print(f"Compliance Report")
print(f"=" * 40)
print(f"Total Calls: {total_calls}")
print(f"Compliant Calls: {compliant_calls}")
print(f"Compliance Rate: {compliance_rate:.1f}%")
print(f"\nViolations by Type:")
for violation_type, count in violation_summary.items():
print(f" {violation_type}: {count}")Compliance Tracking
- Pause duration monitoring (required wait times)
- Goal achievement for required disclosures
- Trend analysis for compliance rates
- Agent-specific compliance scoring
Voice Authentication
Context
Voice biometrics enable caller verification without knowledge-based authentication. Voice profiles are enrolled once and matched against future calls.
Implementation Pattern
- Enroll Voice Profiles - Create profiles from known speaker recordings
- Process Incoming Calls - Upload call recordings for analysis
- Match Speakers - Check voice_profile_id in transcription dialogues
- Verify Identity - Confirm speaker matches expected caller
Example: Caller Verification Flow
def verify_caller(recording_id, expected_profile_id):
"""Verify if expected caller participated in the call."""
transcription = requests.get(
f"{BASE_URL}/recordings/{recording_id}/transcription",
headers=headers
).json()["data"]
# Check all dialogues for voice profile match
matched_dialogues = []
for dialogue in transcription["dialogues"]:
if dialogue["voice_profile_id"] == expected_profile_id:
matched_dialogues.append(dialogue)
if matched_dialogues:
total_duration = sum(d["end_ms"] - d["start_ms"] for d in matched_dialogues)
return {
"verified": True,
"segments_matched": len(matched_dialogues),
"speech_duration_ms": total_duration,
"profile_name": matched_dialogues[0]["voice_profile_name"]
}
return {
"verified": False,
"segments_matched": 0,
"speech_duration_ms": 0,
"profile_name": None
}
# Example usage
result = verify_caller(recording_id=123, expected_profile_id=456)
if result["verified"]:
print(f"Caller verified as {result['profile_name']}")
print(f"Spoke in {result['segments_matched']} segments")
else:
print("Caller identity could not be verified")Voice Biometrics Use Cases
- Fraud prevention (verify caller identity)
- VIP caller identification
- Multi-caller tracking in conference calls
- Agent voice monitoring
Emotion Trend Analysis
Context
Understanding emotional dynamics helps improve customer experience. Tracking emotion changes throughout a call reveals satisfaction patterns and problem moments.
Implementation Pattern
- Fetch Transcription - Get full dialogue with emotion scores
- Segment Timeline - Divide call into phases (opening, middle, closing)
- Calculate Trends - Track emotion changes over time
- Identify Triggers - Find moments where emotions shifted
Example: Emotion Timeline Analyzer
def analyze_emotion_timeline(recording_id):
"""Analyze emotion progression throughout a call."""
transcription = requests.get(
f"{BASE_URL}/recordings/{recording_id}/transcription",
headers=headers
).json()["data"]
timeline = []
for dialogue in transcription["dialogues"]:
emotions = dialogue["emotion"]
# Calculate sentiment score (-1 to 1)
positive = emotions["happy"] + emotions["calm"] + emotions["neutral"] * 0.5
negative = emotions["angry"] + emotions["sad"] + emotions["fearful"]
sentiment = positive - negative
# Find dominant emotion
dominant = max(emotions.items(), key=lambda x: x[1])
timeline.append({
"timestamp_ms": dialogue["start_ms"],
"speaker": dialogue["speaker"],
"sentiment": sentiment,
"dominant_emotion": dominant[0],
"text_preview": dialogue["text"][:50] + "..." if len(dialogue["text"]) > 50 else dialogue["text"]
})
# Identify emotional shifts
shifts = []
for i in range(1, len(timeline)):
prev = timeline[i-1]
curr = timeline[i]
shift = curr["sentiment"] - prev["sentiment"]
if abs(shift) > 0.3: # Significant shift
shifts.append({
"timestamp_ms": curr["timestamp_ms"],
"shift": shift,
"direction": "positive" if shift > 0 else "negative",
"from_emotion": prev["dominant_emotion"],
"to_emotion": curr["dominant_emotion"],
"context": curr["text_preview"]
})
return {
"timeline": timeline,
"shifts": shifts,
"opening_sentiment": timeline[0]["sentiment"] if timeline else 0,
"closing_sentiment": timeline[-1]["sentiment"] if timeline else 0
}
# Example usage
analysis = analyze_emotion_timeline(123)
print(f"Opening sentiment: {analysis['opening_sentiment']:.2f}")
print(f"Closing sentiment: {analysis['closing_sentiment']:.2f}")
print(f"\nSignificant emotional shifts:")
for shift in analysis["shifts"]:
minutes = shift["timestamp_ms"] / 60000
print(f" {minutes:.1f}m: {shift['direction']} shift - {shift['context']}")Emotion Insights
- Opening vs closing sentiment comparison
- Identification of emotional turning points
- Speaker-specific emotion patterns
- Correlation with call outcomes
Bulk Processing Pipeline
Context
Organizations with high call volumes need efficient batch processing. This pattern handles uploading many files and retrieving results at scale.
Implementation Pattern
- Upload in Batches - Send multiple files with rate limiting
- Track Progress - Monitor processing status for all recordings
- Retrieve in Parallel - Fetch results as recordings complete
- Handle Failures - Retry failed transcriptions
Example: Batch Processing System
import time
from concurrent.futures import ThreadPoolExecutor, as_completed
def upload_recording(file_path):
"""Upload a single recording."""
with open(file_path, "rb") as f:
response = requests.post(
f"{BASE_URL}/recordings",
headers=headers,
files={"file": f},
data={"language": "en"}
)
return response.json()["data"]
def wait_for_completion(recording_id, timeout=600):
"""Wait for a recording to complete processing."""
start = time.time()
while time.time() - start < timeout:
response = requests.get(
f"{BASE_URL}/recordings/{recording_id}",
headers=headers
)
recording = response.json()["data"]
if recording["status"] == "completed":
return recording
if recording["status"] == "failed":
raise Exception(f"Recording {recording_id} failed")
time.sleep(5)
raise TimeoutError(f"Recording {recording_id} timed out")
def process_batch(file_paths, max_workers=5):
"""Process a batch of recordings."""
results = {"success": [], "failed": []}
# Upload all files
uploads = []
for path in file_paths:
try:
recording = upload_recording(path)
uploads.append({"path": path, "id": recording["id"]})
print(f"Uploaded {path} -> ID {recording['id']}")
time.sleep(0.5) # Rate limiting
except Exception as e:
results["failed"].append({"path": path, "error": str(e)})
# Wait for completion in parallel
with ThreadPoolExecutor(max_workers=max_workers) as executor:
futures = {
executor.submit(wait_for_completion, u["id"]): u
for u in uploads
}
for future in as_completed(futures):
upload = futures[future]
try:
recording = future.result()
results["success"].append({
"path": upload["path"],
"id": recording["id"],
"duration_ms": recording["duration_ms"]
})
print(f"Completed: {upload['path']}")
except Exception as e:
results["failed"].append({
"path": upload["path"],
"id": upload["id"],
"error": str(e)
})
print(f"Failed: {upload['path']} - {e}")
return results
# Example usage
files = ["call1.mp3", "call2.mp3", "call3.mp3"]
results = process_batch(files)
print(f"\nCompleted: {len(results['success'])}")
print(f"Failed: {len(results['failed'])}")Batch Processing Tips
- Implement exponential backoff for rate limits
- Use webhooks instead of polling for large batches
- Store recording IDs for retry handling
- Monitor API quota usage