Analyzing Results
Learn how to interpret and use the analysis results from IdentityCall, including goal evaluations, keyword detections, and pause analysis.
Goal Evaluation
Goals are AI-powered evaluations of whether specific objectives were achieved during the call.
Understanding Goal Scores
| Score Range | Meaning |
|---|---|
| 0.9 - 1.0 | Strongly achieved |
| 0.7 - 0.9 | Achieved |
| 0.5 - 0.7 | Partially achieved |
| 0.3 - 0.5 | Weakly achieved |
| 0.0 - 0.3 | Not achieved |
Example Goal Analysis
Python
import requests
import os
def analyze_goals(recording_id):
API_KEY = os.environ.get("IDENTITYCALL_API_KEY")
response = requests.get(
f"https://api.identitycall.com/api/v1/public/recordings/{recording_id}/results",
headers={"Authorization": f"Bearer {API_KEY}"}
)
results = response.json()["data"]
# Calculate overall achievement
goals = results["goals"]
total_score = sum(g["score"] for g in goals)
avg_score = total_score / len(goals) if goals else 0
met_count = sum(1 for g in goals if g["met"])
print(f"Goal Achievement: {met_count}/{len(goals)} ({avg_score:.0%})")
print()
# Group by achievement
achieved = [g for g in goals if g["met"]]
not_achieved = [g for g in goals if not g["met"]]
if achieved:
print("✓ Achieved Goals:")
for goal in achieved:
print(f" - {goal['goal_name']} (score: {goal['score']:.2f})")
print(f" {goal['explanation']}")
if not_achieved:
print("\n✗ Not Achieved:")
for goal in not_achieved:
print(f" - {goal['goal_name']} (score: {goal['score']:.2f})")
print(f" {goal['explanation']}")
return resultsKeyword Detection
Keywords track mentions of important terms, phrases, or compliance language.
Keyword Sentiment
| Sentiment | Use Case |
|---|---|
positive | Thank you, excellent, resolved |
negative | Frustrated, complaint, refund |
neutral | Product names, technical terms |
Analyzing Keywords
Python
def analyze_keywords(results):
keywords = results["keywords"]
# Group by sentiment
positive = [k for k in keywords if k["sentiment"] == "positive"]
negative = [k for k in keywords if k["sentiment"] == "negative"]
neutral = [k for k in keywords if k["sentiment"] == "neutral"]
print("Keyword Analysis:")
if positive:
total_positive = sum(k["count"] for k in positive)
print(f"\n Positive mentions: {total_positive}")
for k in positive:
print(f" - '{k['keyword_name']}': {k['count']} times")
if negative:
total_negative = sum(k["count"] for k in negative)
print(f"\n Negative mentions: {total_negative}")
for k in negative:
print(f" - '{k['keyword_name']}': {k['count']} times")
print(f" at: {k['timestamps_ms']}")
# Calculate sentiment ratio
pos_count = sum(k["count"] for k in positive)
neg_count = sum(k["count"] for k in negative)
total = pos_count + neg_count
if total > 0:
sentiment_score = pos_count / total
print(f"\n Sentiment ratio: {sentiment_score:.0%} positive")Pause Analysis
Pauses identify silent periods that may indicate compliance issues or conversation problems.
Compliance Rules
Pauses are evaluated against configurable thresholds:
| Duration | Typical Interpretation |
|---|---|
| < 2 seconds | Normal conversation flow |
| 2-5 seconds | Extended pause, usually acceptable |
| 5-10 seconds | Long pause, may need review |
| > 10 seconds | Compliance concern |
Analyzing Pauses
def analyze_pauses(results):
pauses = results["pauses"]
if not pauses:
print("No significant pauses detected")
return
compliant = [p for p in pauses if p["compliant"]]
non_compliant = [p for p in pauses if not p["compliant"]]
total_pause_time = sum(p["duration_ms"] for p in pauses)
print(f"Pause Analysis:")
print(f" Total pauses: {len(pauses)}")
print(f" Total pause time: {total_pause_time / 1000:.1f}s")
print(f" Compliant: {len(compliant)}, Non-compliant: {len(non_compliant)}")
if non_compliant:
print("\n ⚠ Non-compliant pauses:")
for p in non_compliant:
duration = p["duration_ms"] / 1000
start = p["start_ms"] / 1000
print(f" - {duration:.1f}s pause at {start:.1f}s")Emotion Analysis
Emotions are detected for each dialogue segment in the transcription.
Tracking Emotions Over Time
def analyze_emotions(transcription):
dialogues = transcription["dialogues"]
# Track emotion trends
emotions_over_time = []
for d in dialogues:
emotions = d["emotion"]
# Find dominant emotion
dominant = max(emotions.items(), key=lambda x: x[1])
emotions_over_time.append({
"position": d["position"],
"speaker": d["speaker"],
"time_ms": d["start_ms"],
"dominant_emotion": dominant[0],
"probability": dominant[1]
})
print("Emotion Timeline:")
for e in emotions_over_time[:10]: # First 10 segments
time_sec = e["time_ms"] / 1000
print(f" [{e['speaker']}] {time_sec:.1f}s: {e['dominant_emotion']} ({e['probability']:.0%})")
# Calculate overall emotion distribution
emotion_totals = {}
for d in dialogues:
for emotion, prob in d["emotion"].items():
emotion_totals[emotion] = emotion_totals.get(emotion, 0) + prob
total = sum(emotion_totals.values())
print("\nOverall Emotion Distribution:")
for emotion, value in sorted(emotion_totals.items(), key=lambda x: -x[1]):
pct = value / total * 100
if pct > 5: # Only show significant emotions
print(f" {emotion}: {pct:.1f}%")Building Reports
Combine all analyses into a comprehensive report:
def generate_call_report(recording_id, api_key):
base_url = "https://api.identitycall.com/api/v1/public"
headers = {"Authorization": f"Bearer {api_key}"}
# Fetch all data
recording = requests.get(f"{base_url}/recordings/{recording_id}", headers=headers).json()["data"]
transcription = requests.get(f"{base_url}/recordings/{recording_id}/transcription", headers=headers).json()["data"]
results = requests.get(f"{base_url}/recordings/{recording_id}/results", headers=headers).json()["data"]
summary = requests.get(f"{base_url}/recordings/{recording_id}/summary", headers=headers).json()["data"]
# Generate report
report = {
"recording_id": recording_id,
"duration_seconds": recording["duration_ms"] / 1000,
"speakers": summary["speakers"],
"summary": summary["summary"],
"goal_achievement_pct": summary["goal_achievement"],
"goals": [{
"name": g["goal_name"],
"met": g["met"],
"score": g["score"]
} for g in results["goals"]],
"keyword_counts": {k["keyword_name"]: k["count"] for k in results["keywords"]},
"non_compliant_pauses": len([p for p in results["pauses"] if not p["compliant"]]),
"dialogue_count": len(transcription["dialogues"])
}
return report