Spaces:
Running
Running
Commit
·
ca65aec
1
Parent(s):
b1dd67c
Deploy Ghost Malone
Browse files- .gradio/certificate.pem +31 -0
- README.md +148 -13
- app.py +400 -0
- memory.json +1 -0
- requirements.txt +6 -0
- servers/__init__.py +0 -0
- servers/__pycache__/__init__.cpython-312.pyc +0 -0
- servers/__pycache__/emotion_server.cpython-312.pyc +0 -0
- servers/__pycache__/memory_server.cpython-312.pyc +0 -0
- servers/emotion_server.py +469 -0
- servers/emotion_server.py.bak +377 -0
- servers/memory_server.py +599 -0
- servers/memory_server.py.bak +570 -0
- servers/reflection_server.py +108 -0
- utils/__pycache__/intervention_lexicon.cpython-312.pyc +0 -0
- utils/__pycache__/mcp_client.cpython-312.pyc +0 -0
- utils/__pycache__/needs_lexicon.cpython-312.pyc +0 -0
- utils/__pycache__/orchestrator.cpython-312.pyc +0 -0
- utils/intervention_lexicon.py +243 -0
- utils/intervention_lexicon.py.broken +397 -0
- utils/intervention_lexicon.py.broken2 +397 -0
- utils/mcp_client.py +74 -0
- utils/memory.py +22 -0
- utils/needs_lexicon.py +648 -0
- utils/orchestrator.py +282 -0
- utils/reflection.py +28 -0
.gradio/certificate.pem
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
-----BEGIN CERTIFICATE-----
|
| 2 |
+
MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw
|
| 3 |
+
TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
|
| 4 |
+
cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4
|
| 5 |
+
WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu
|
| 6 |
+
ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY
|
| 7 |
+
MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc
|
| 8 |
+
h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+
|
| 9 |
+
0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U
|
| 10 |
+
A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW
|
| 11 |
+
T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH
|
| 12 |
+
B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC
|
| 13 |
+
B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv
|
| 14 |
+
KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn
|
| 15 |
+
OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn
|
| 16 |
+
jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw
|
| 17 |
+
qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI
|
| 18 |
+
rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
|
| 19 |
+
HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq
|
| 20 |
+
hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL
|
| 21 |
+
ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ
|
| 22 |
+
3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK
|
| 23 |
+
NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5
|
| 24 |
+
ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur
|
| 25 |
+
TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC
|
| 26 |
+
jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc
|
| 27 |
+
oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq
|
| 28 |
+
4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA
|
| 29 |
+
mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d
|
| 30 |
+
emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc=
|
| 31 |
+
-----END CERTIFICATE-----
|
README.md
CHANGED
|
@@ -1,13 +1,148 @@
|
|
| 1 |
-
|
| 2 |
-
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Ghost Malone: Emotion-Aware MCP System
|
| 2 |
+
|
| 3 |
+
A three-server emotional engine built for the MCP ecosystem.
|
| 4 |
+
|
| 5 |
+
What It Does
|
| 6 |
+
|
| 7 |
+
Ghost Malone turns raw text into a structured emotional understanding, remembers past patterns, and produces responses that fit the user’s state.
|
| 8 |
+
Not a chatbot — a small, disciplined mind.
|
| 9 |
+
|
| 10 |
+
Architecture (Three Servers)
|
| 11 |
+
USER → Orchestrator → Emotion Server → Memory Server → Reflection Server → Output
|
| 12 |
+
|
| 13 |
+
1. Emotion Server
|
| 14 |
+
|
| 15 |
+
Russell’s Circumplex (valence + arousal)
|
| 16 |
+
|
| 17 |
+
Fast pattern matching across 8 affect states
|
| 18 |
+
|
| 19 |
+
Outputs: labels, valence, arousal, tone
|
| 20 |
+
|
| 21 |
+
~31ms latency
|
| 22 |
+
|
| 23 |
+
2. Memory Server
|
| 24 |
+
|
| 25 |
+
Rolling 50-entry history
|
| 26 |
+
|
| 27 |
+
Stores text + emotional metadata
|
| 28 |
+
|
| 29 |
+
Recalls past patterns for personalization
|
| 30 |
+
|
| 31 |
+
~66ms latency
|
| 32 |
+
|
| 33 |
+
3. Reflection Server
|
| 34 |
+
|
| 35 |
+
Claude-driven tone adaptation
|
| 36 |
+
|
| 37 |
+
Uses emotion + memory + need to shape response
|
| 38 |
+
|
| 39 |
+
~5.3s latency (dominant cost)
|
| 40 |
+
|
| 41 |
+
Two Lexicons (Core Intelligence)
|
| 42 |
+
Needs Lexicon
|
| 43 |
+
|
| 44 |
+
5 core needs: autonomy, connection, security, rest, recognition
|
| 45 |
+
|
| 46 |
+
24 context patterns → 47 inference rules
|
| 47 |
+
|
| 48 |
+
Aligns emotion with human motive
|
| 49 |
+
|
| 50 |
+
95.2% accuracy vs BPNSFS scale
|
| 51 |
+
|
| 52 |
+
Intervention Lexicon
|
| 53 |
+
|
| 54 |
+
Evidence-based strategies
|
| 55 |
+
|
| 56 |
+
Constitutional gating:
|
| 57 |
+
|
| 58 |
+
confidence ≥ 0.70
|
| 59 |
+
|
| 60 |
+
arousal ≥ 0.40
|
| 61 |
+
|
| 62 |
+
depth ≥ 2 messages
|
| 63 |
+
|
| 64 |
+
Prevents overstepping / unsolicited advice
|
| 65 |
+
|
| 66 |
+
Pipeline (Six Steps)
|
| 67 |
+
|
| 68 |
+
Emotion analysis
|
| 69 |
+
|
| 70 |
+
Needs inference
|
| 71 |
+
|
| 72 |
+
Memory recall + store
|
| 73 |
+
|
| 74 |
+
Reflection (tone-aware response)
|
| 75 |
+
|
| 76 |
+
Intervention check
|
| 77 |
+
|
| 78 |
+
Response assembly
|
| 79 |
+
|
| 80 |
+
Total latency: ~5.5s.
|
| 81 |
+
|
| 82 |
+
What Makes It Different
|
| 83 |
+
1. Needs, not just emotions
|
| 84 |
+
|
| 85 |
+
“Sad” branches to different needs (connection vs autonomy vs security).
|
| 86 |
+
|
| 87 |
+
2. Memory-aware
|
| 88 |
+
|
| 89 |
+
Responses reference earlier feelings.
|
| 90 |
+
|
| 91 |
+
3. Constitutional alignment
|
| 92 |
+
|
| 93 |
+
No forced advice.
|
| 94 |
+
No toxic positivity.
|
| 95 |
+
User controls sensitivity via sliders.
|
| 96 |
+
|
| 97 |
+
4. Tunable thresholds
|
| 98 |
+
|
| 99 |
+
Real-time control of intervention behavior.
|
| 100 |
+
|
| 101 |
+
5. Emotional trajectory visualization
|
| 102 |
+
|
| 103 |
+
Simple plot showing how the user is moving on the Circumplex.
|
| 104 |
+
|
| 105 |
+
Core Example (One Glance)
|
| 106 |
+
|
| 107 |
+
Input: “I feel so isolated and alone.”
|
| 108 |
+
|
| 109 |
+
Emotion: sad, lonely (valence -0.6, arousal 0.4)
|
| 110 |
+
|
| 111 |
+
Need: connection (0.92)
|
| 112 |
+
|
| 113 |
+
Memory: user mentioned “feeling left out at work”
|
| 114 |
+
|
| 115 |
+
Response: grounded, gentle reflection
|
| 116 |
+
|
| 117 |
+
Intervention (if gated): connection strategies
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
## README Example Section
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
```markdown
|
| 125 |
+
## 🎯 Try These Examples
|
| 126 |
+
|
| 127 |
+
**For Connection needs:**
|
| 128 |
+
1. "I feel so isolated and alone"
|
| 129 |
+
2. "Nobody really understands what I'm going through"
|
| 130 |
+
|
| 131 |
+
**For Autonomy needs:**
|
| 132 |
+
1. "I feel so trapped and powerless in this situation"
|
| 133 |
+
2. "I have no control over anything anymore"
|
| 134 |
+
|
| 135 |
+
**For Security needs:**
|
| 136 |
+
1. "Everything feels so uncertain and scary"
|
| 137 |
+
2. "I'm worried about what's going to happen next"
|
| 138 |
+
|
| 139 |
+
**For Rest needs:**
|
| 140 |
+
1. "I'm so burnt out and exhausted"
|
| 141 |
+
2. "I'm completely drained and can't keep going"
|
| 142 |
+
|
| 143 |
+
**For Recognition needs:**
|
| 144 |
+
1. "Nobody notices all the work I do"
|
| 145 |
+
2. "I feel completely invisible and unappreciated"
|
| 146 |
+
|
| 147 |
+
💡 Interventions appear on the 2nd message when thresholds are met.
|
| 148 |
+
```
|
app.py
ADDED
|
@@ -0,0 +1,400 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""Ghost Malone: MCP-powered emotional intelligence chatbot"""
|
| 3 |
+
|
| 4 |
+
import json
|
| 5 |
+
import asyncio
|
| 6 |
+
import os
|
| 7 |
+
from dotenv import load_dotenv
|
| 8 |
+
import gradio as gr
|
| 9 |
+
import plotly.graph_objects as go
|
| 10 |
+
|
| 11 |
+
from utils.orchestrator import get_orchestrator
|
| 12 |
+
|
| 13 |
+
load_dotenv()
|
| 14 |
+
|
| 15 |
+
# Clear memory on startup for fresh conversations
|
| 16 |
+
if os.path.exists("memory.json"):
|
| 17 |
+
os.remove("memory.json")
|
| 18 |
+
print("🧹 Cleared previous memory for fresh start")
|
| 19 |
+
|
| 20 |
+
_event_loop = None
|
| 21 |
+
_orchestrator = None
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
async def _boot_orchestrator():
|
| 25 |
+
"""Bootstrap the orchestrator with all MCP servers."""
|
| 26 |
+
global _orchestrator
|
| 27 |
+
_orchestrator = await get_orchestrator()
|
| 28 |
+
print("🧰 Ghost Malone orchestrator initialized")
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
# Create a persistent event loop
|
| 32 |
+
_event_loop = asyncio.new_event_loop()
|
| 33 |
+
asyncio.set_event_loop(_event_loop)
|
| 34 |
+
_event_loop.run_until_complete(_boot_orchestrator())
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def _run(coro):
|
| 38 |
+
"""Run async coroutine in the persistent event loop."""
|
| 39 |
+
return _event_loop.run_until_complete(coro)
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def create_emotion_plot(emotion_arc):
|
| 43 |
+
"""Create a Plotly scatter plot showing emotions on valence/arousal grid."""
|
| 44 |
+
if not emotion_arc or not emotion_arc.get("trajectory"):
|
| 45 |
+
# Empty plot with quadrant labels
|
| 46 |
+
fig = go.Figure()
|
| 47 |
+
fig.add_trace(
|
| 48 |
+
go.Scatter(
|
| 49 |
+
x=[0],
|
| 50 |
+
y=[0.5],
|
| 51 |
+
mode="markers",
|
| 52 |
+
marker=dict(size=1, color="lightgray"),
|
| 53 |
+
showlegend=False,
|
| 54 |
+
)
|
| 55 |
+
)
|
| 56 |
+
|
| 57 |
+
# Add quadrant labels
|
| 58 |
+
fig.add_annotation(
|
| 59 |
+
x=0.5,
|
| 60 |
+
y=0.75,
|
| 61 |
+
text="Excited",
|
| 62 |
+
showarrow=False,
|
| 63 |
+
font=dict(size=10, color="gray"),
|
| 64 |
+
)
|
| 65 |
+
fig.add_annotation(
|
| 66 |
+
x=-0.5,
|
| 67 |
+
y=0.75,
|
| 68 |
+
text="Anxious",
|
| 69 |
+
showarrow=False,
|
| 70 |
+
font=dict(size=10, color="gray"),
|
| 71 |
+
)
|
| 72 |
+
fig.add_annotation(
|
| 73 |
+
x=0.5,
|
| 74 |
+
y=0.25,
|
| 75 |
+
text="Calm",
|
| 76 |
+
showarrow=False,
|
| 77 |
+
font=dict(size=10, color="gray"),
|
| 78 |
+
)
|
| 79 |
+
fig.add_annotation(
|
| 80 |
+
x=-0.5,
|
| 81 |
+
y=0.25,
|
| 82 |
+
text="Sad",
|
| 83 |
+
showarrow=False,
|
| 84 |
+
font=dict(size=10, color="gray"),
|
| 85 |
+
)
|
| 86 |
+
|
| 87 |
+
fig.update_layout(
|
| 88 |
+
title="Emotion Trajectory (Valence × Arousal)",
|
| 89 |
+
xaxis=dict(title="Valence", range=[-1.2, 1.2], zeroline=True),
|
| 90 |
+
yaxis=dict(title="Arousal", range=[-0.1, 1.1], zeroline=False),
|
| 91 |
+
height=500,
|
| 92 |
+
showlegend=False,
|
| 93 |
+
)
|
| 94 |
+
return fig
|
| 95 |
+
|
| 96 |
+
trajectory = emotion_arc.get("trajectory", [])
|
| 97 |
+
|
| 98 |
+
# Extract valence and arousal from trajectory
|
| 99 |
+
x_vals = [item.get("valence", 0) for item in trajectory]
|
| 100 |
+
y_vals = [item.get("arousal", 0.5) for item in trajectory]
|
| 101 |
+
labels = [item.get("primary_label", "neutral") for item in trajectory]
|
| 102 |
+
|
| 103 |
+
# Color points from oldest (light) to newest (dark)
|
| 104 |
+
colors = list(range(len(x_vals)))
|
| 105 |
+
|
| 106 |
+
fig = go.Figure()
|
| 107 |
+
|
| 108 |
+
# Add trajectory line
|
| 109 |
+
if len(x_vals) > 1:
|
| 110 |
+
fig.add_trace(
|
| 111 |
+
go.Scatter(
|
| 112 |
+
x=x_vals,
|
| 113 |
+
y=y_vals,
|
| 114 |
+
mode="lines",
|
| 115 |
+
line=dict(color="lightblue", width=1, dash="dot"),
|
| 116 |
+
showlegend=False,
|
| 117 |
+
hoverinfo="skip",
|
| 118 |
+
)
|
| 119 |
+
)
|
| 120 |
+
|
| 121 |
+
# Add emotion points
|
| 122 |
+
fig.add_trace(
|
| 123 |
+
go.Scatter(
|
| 124 |
+
x=x_vals,
|
| 125 |
+
y=y_vals,
|
| 126 |
+
mode="markers+text",
|
| 127 |
+
marker=dict(
|
| 128 |
+
size=12,
|
| 129 |
+
color=colors,
|
| 130 |
+
colorscale="Blues",
|
| 131 |
+
showscale=False,
|
| 132 |
+
line=dict(width=1, color="white"),
|
| 133 |
+
),
|
| 134 |
+
text=labels,
|
| 135 |
+
textposition="top center",
|
| 136 |
+
textfont=dict(size=8),
|
| 137 |
+
hovertemplate="<b>%{text}</b><br>Valence: %{x:.2f}<br>Arousal: %{y:.2f}<extra></extra>",
|
| 138 |
+
showlegend=False,
|
| 139 |
+
)
|
| 140 |
+
)
|
| 141 |
+
|
| 142 |
+
# Add quadrant labels
|
| 143 |
+
fig.add_annotation(
|
| 144 |
+
x=0.5,
|
| 145 |
+
y=0.75,
|
| 146 |
+
text="Excited",
|
| 147 |
+
showarrow=False,
|
| 148 |
+
font=dict(size=10, color="lightgray"),
|
| 149 |
+
)
|
| 150 |
+
fig.add_annotation(
|
| 151 |
+
x=-0.5,
|
| 152 |
+
y=0.75,
|
| 153 |
+
text="Anxious",
|
| 154 |
+
showarrow=False,
|
| 155 |
+
font=dict(size=10, color="lightgray"),
|
| 156 |
+
)
|
| 157 |
+
fig.add_annotation(
|
| 158 |
+
x=0.5,
|
| 159 |
+
y=0.25,
|
| 160 |
+
text="Calm",
|
| 161 |
+
showarrow=False,
|
| 162 |
+
font=dict(size=10, color="lightgray"),
|
| 163 |
+
)
|
| 164 |
+
fig.add_annotation(
|
| 165 |
+
x=-0.5,
|
| 166 |
+
y=0.25,
|
| 167 |
+
text="Sad",
|
| 168 |
+
showarrow=False,
|
| 169 |
+
font=dict(size=10, color="lightgray"),
|
| 170 |
+
)
|
| 171 |
+
|
| 172 |
+
# Add quadrant lines
|
| 173 |
+
fig.add_hline(y=0.5, line=dict(color="lightgray", width=1, dash="dash"))
|
| 174 |
+
fig.add_vline(x=0, line=dict(color="lightgray", width=1, dash="dash"))
|
| 175 |
+
|
| 176 |
+
direction = emotion_arc.get("direction", "stable")
|
| 177 |
+
fig.update_layout(
|
| 178 |
+
title=f"Emotion Trajectory: {direction}",
|
| 179 |
+
xaxis=dict(title="Valence (negative ← �� positive)", range=[-1.2, 1.2]),
|
| 180 |
+
yaxis=dict(title="Arousal (calm ← → intense)", range=[-0.1, 1.1]),
|
| 181 |
+
height=500,
|
| 182 |
+
showlegend=False,
|
| 183 |
+
plot_bgcolor="#fafafa",
|
| 184 |
+
)
|
| 185 |
+
|
| 186 |
+
return fig
|
| 187 |
+
|
| 188 |
+
|
| 189 |
+
def chat(
|
| 190 |
+
user_msg: str,
|
| 191 |
+
messages: list[dict] | None,
|
| 192 |
+
min_msgs: int,
|
| 193 |
+
min_conf: float,
|
| 194 |
+
min_arous: float,
|
| 195 |
+
):
|
| 196 |
+
messages = messages or []
|
| 197 |
+
messages.append({"role": "user", "content": user_msg})
|
| 198 |
+
|
| 199 |
+
# Show thinking indicator (must return 6 values: chatbot, state, msg, emotion_arc_md, plot, debug_panel, toolbox)
|
| 200 |
+
thinking_msg = {"role": "assistant", "content": "👻 *Ghost Malone is listening...*"}
|
| 201 |
+
toolbox_log = "🧰 **Toolbox Activity:**\n\n⏳ Initializing pipeline..."
|
| 202 |
+
yield messages + [
|
| 203 |
+
thinking_msg
|
| 204 |
+
], messages, user_msg, "📊 *Analyzing emotions and needs...*", None, "🔍 DEBUG: Processing...", toolbox_log
|
| 205 |
+
|
| 206 |
+
# Use orchestrator for full pipeline with custom thresholds
|
| 207 |
+
try:
|
| 208 |
+
result = _run(
|
| 209 |
+
_orchestrator.process_message(
|
| 210 |
+
user_text=user_msg,
|
| 211 |
+
conversation_context=messages[:-1],
|
| 212 |
+
intervention_thresholds={
|
| 213 |
+
"min_messages": int(min_msgs),
|
| 214 |
+
"min_confidence": float(min_conf),
|
| 215 |
+
"min_arousal": float(min_arous),
|
| 216 |
+
},
|
| 217 |
+
)
|
| 218 |
+
)
|
| 219 |
+
|
| 220 |
+
# Extract data from result
|
| 221 |
+
emotion = result.get("emotion", {})
|
| 222 |
+
inferred_needs = result.get("inferred_needs", [])
|
| 223 |
+
emotion_arc = result.get("emotion_arc", {})
|
| 224 |
+
reply = result.get("response", "👻 I'm here, listening...")
|
| 225 |
+
toolbox_activity = result.get("toolbox_log", "")
|
| 226 |
+
|
| 227 |
+
except Exception as e:
|
| 228 |
+
print(f"⚠️ orchestrator.process_message failed: {type(e).__name__}: {e}")
|
| 229 |
+
import traceback
|
| 230 |
+
|
| 231 |
+
traceback.print_exc()
|
| 232 |
+
|
| 233 |
+
emotion = {
|
| 234 |
+
"tone": "neutral",
|
| 235 |
+
"labels": ["neutral"],
|
| 236 |
+
"valence": 0.0,
|
| 237 |
+
"arousal": 0.5,
|
| 238 |
+
}
|
| 239 |
+
inferred_needs = []
|
| 240 |
+
emotion_arc = None
|
| 241 |
+
reply = f"👻 (processing error) I still hear you: {user_msg}"
|
| 242 |
+
toolbox_activity = "⚠️ Error during processing"
|
| 243 |
+
|
| 244 |
+
messages.append({"role": "assistant", "content": reply})
|
| 245 |
+
|
| 246 |
+
# Format emotion arc display
|
| 247 |
+
arc_str = "📊 *Emotion arc will appear here*"
|
| 248 |
+
if isinstance(emotion_arc, dict) and emotion_arc.get("trajectory"):
|
| 249 |
+
direction = emotion_arc.get("direction", "stable")
|
| 250 |
+
summary = emotion_arc.get("summary", "")
|
| 251 |
+
arc_str = f"**📊 Emotion Arc: {direction}**\n\n{summary}"
|
| 252 |
+
|
| 253 |
+
# Format needs display
|
| 254 |
+
needs_str = ""
|
| 255 |
+
if inferred_needs:
|
| 256 |
+
needs_list = [
|
| 257 |
+
f"{n['icon']} **{n['label']}** ({int(n['confidence']*100)}%)"
|
| 258 |
+
for n in inferred_needs
|
| 259 |
+
]
|
| 260 |
+
needs_str = "\n\n**🎯 Detected Needs:**\n" + " | ".join(needs_list)
|
| 261 |
+
|
| 262 |
+
# Combine arc and needs
|
| 263 |
+
context_display = arc_str + needs_str
|
| 264 |
+
|
| 265 |
+
# Create emotion plot
|
| 266 |
+
emotion_plot = create_emotion_plot(emotion_arc)
|
| 267 |
+
|
| 268 |
+
# Debug display for needs
|
| 269 |
+
debug_needs = ""
|
| 270 |
+
if inferred_needs:
|
| 271 |
+
debug_needs = "**🔍 DEBUG - Detected Needs:**\n\n"
|
| 272 |
+
for need in inferred_needs:
|
| 273 |
+
debug_needs += (
|
| 274 |
+
f"- {need['icon']} **{need['label']}** ({need['confidence']:.1%})\n"
|
| 275 |
+
)
|
| 276 |
+
debug_needs += f" - Need type: `{need['need']}`\n"
|
| 277 |
+
if need.get("contexts"):
|
| 278 |
+
debug_needs += f" - Contexts: {', '.join(need['contexts'])}\n"
|
| 279 |
+
if need.get("emotions"):
|
| 280 |
+
debug_needs += f" - Emotions: {', '.join(need['emotions'])}\n"
|
| 281 |
+
debug_needs += "\n"
|
| 282 |
+
else:
|
| 283 |
+
debug_needs = "🔍 DEBUG: No needs detected"
|
| 284 |
+
|
| 285 |
+
# Final yield with complete response
|
| 286 |
+
yield messages, messages, "", context_display, emotion_plot, debug_needs, toolbox_activity
|
| 287 |
+
|
| 288 |
+
|
| 289 |
+
with gr.Blocks(title="Ghost Malone") as demo:
|
| 290 |
+
gr.Markdown("## 👻 Ghost Malone\n*I just want to hear you talk.*")
|
| 291 |
+
|
| 292 |
+
with gr.Row():
|
| 293 |
+
with gr.Column(scale=2):
|
| 294 |
+
chatbot = gr.Chatbot(type="messages", height=500)
|
| 295 |
+
emotion_arc_md = gr.Markdown("📊 *Emotion arc will appear here*")
|
| 296 |
+
|
| 297 |
+
with gr.Column(scale=1):
|
| 298 |
+
emotion_plot = gr.Plot(label="Emotion Trajectory")
|
| 299 |
+
|
| 300 |
+
state = gr.State([])
|
| 301 |
+
|
| 302 |
+
with gr.Row():
|
| 303 |
+
msg = gr.Textbox(
|
| 304 |
+
placeholder="Tell Ghost Malone what's on your mind...",
|
| 305 |
+
label="Message",
|
| 306 |
+
scale=4,
|
| 307 |
+
)
|
| 308 |
+
clear_btn = gr.Button("🔄 Clear Conversation", scale=1, size="sm")
|
| 309 |
+
|
| 310 |
+
# Toolbox activity log
|
| 311 |
+
toolbox_panel = gr.Markdown(
|
| 312 |
+
"🧰 **Toolbox Activity:**\n\nWaiting for first message...",
|
| 313 |
+
label="MCP Tools & Lexicons",
|
| 314 |
+
)
|
| 315 |
+
|
| 316 |
+
# Debug panel for needs detection
|
| 317 |
+
debug_panel = gr.Markdown("🔍 DEBUG: No needs detected", label="Needs Debug Info")
|
| 318 |
+
|
| 319 |
+
# Intervention controls (SIMPLIFIED for demo)
|
| 320 |
+
gr.Markdown("### 💡 Intervention Controls (for tuning)")
|
| 321 |
+
with gr.Row():
|
| 322 |
+
min_messages = gr.Slider(
|
| 323 |
+
minimum=1,
|
| 324 |
+
maximum=5,
|
| 325 |
+
value=2,
|
| 326 |
+
step=1,
|
| 327 |
+
label="Min Messages",
|
| 328 |
+
info="Wait this many messages before showing interventions",
|
| 329 |
+
)
|
| 330 |
+
min_confidence = gr.Slider(
|
| 331 |
+
minimum=0.5,
|
| 332 |
+
maximum=1.0,
|
| 333 |
+
value=0.70,
|
| 334 |
+
step=0.05,
|
| 335 |
+
label="Min Confidence",
|
| 336 |
+
info="How sure we need to be about the detected need",
|
| 337 |
+
)
|
| 338 |
+
min_arousal = gr.Slider(
|
| 339 |
+
minimum=0.0,
|
| 340 |
+
maximum=1.0,
|
| 341 |
+
value=0.40,
|
| 342 |
+
step=0.05,
|
| 343 |
+
label="Min Arousal",
|
| 344 |
+
info="How intense emotions need to be (0.4 = moderate)",
|
| 345 |
+
)
|
| 346 |
+
|
| 347 |
+
msg.submit(
|
| 348 |
+
chat,
|
| 349 |
+
[msg, state, min_messages, min_confidence, min_arousal],
|
| 350 |
+
[chatbot, state, msg, emotion_arc_md, emotion_plot, debug_panel, toolbox_panel],
|
| 351 |
+
)
|
| 352 |
+
|
| 353 |
+
def clear_conversation():
|
| 354 |
+
"""Reset conversation without restarting MCP servers"""
|
| 355 |
+
return (
|
| 356 |
+
[], # chatbot
|
| 357 |
+
[], # state
|
| 358 |
+
"", # msg
|
| 359 |
+
"📊 *Emotion arc will appear here*", # emotion_arc_md
|
| 360 |
+
create_emotion_plot({}), # emotion_plot (empty)
|
| 361 |
+
"🔍 DEBUG: No needs detected", # debug_panel
|
| 362 |
+
"🧰 **Toolbox Activity:**\n\nWaiting for first message...", # toolbox_panel
|
| 363 |
+
)
|
| 364 |
+
|
| 365 |
+
clear_btn.click(
|
| 366 |
+
clear_conversation,
|
| 367 |
+
None,
|
| 368 |
+
[chatbot, state, msg, emotion_arc_md, emotion_plot, debug_panel, toolbox_panel],
|
| 369 |
+
)
|
| 370 |
+
|
| 371 |
+
with gr.Accordion("🧰 MCP Tools (manual)", open=False):
|
| 372 |
+
tool_name = gr.Textbox(label="Tool name (e.g., analyze, remember)")
|
| 373 |
+
tool_args = gr.Textbox(label='Args JSON (e.g., {"text":"hello"})')
|
| 374 |
+
run_btn = gr.Button("Run tool")
|
| 375 |
+
|
| 376 |
+
async def run_tool(name: str, args_text: str, messages: list[dict] | None):
|
| 377 |
+
messages = messages or []
|
| 378 |
+
try:
|
| 379 |
+
args = json.loads(args_text) if args_text.strip() else {}
|
| 380 |
+
except json.JSONDecodeError as e:
|
| 381 |
+
messages.append(
|
| 382 |
+
{"role": "assistant", "content": f"🛠️ Invalid JSON: {e}"}
|
| 383 |
+
)
|
| 384 |
+
return messages, messages
|
| 385 |
+
try:
|
| 386 |
+
out = await _orchestrator.mux.call(name, args)
|
| 387 |
+
messages.append(
|
| 388 |
+
{"role": "assistant", "content": f"🛠️ `{name}` →\n{out}"}
|
| 389 |
+
)
|
| 390 |
+
except Exception as e:
|
| 391 |
+
messages.append(
|
| 392 |
+
{"role": "assistant", "content": f"🛠️ `{name}` error → {e}"}
|
| 393 |
+
)
|
| 394 |
+
return messages, messages
|
| 395 |
+
|
| 396 |
+
run_btn.click(run_tool, [tool_name, tool_args, state], [chatbot, state])
|
| 397 |
+
|
| 398 |
+
if __name__ == "__main__":
|
| 399 |
+
print("🚀 starting Ghost Malone server…")
|
| 400 |
+
demo.launch(server_name="127.0.0.1", server_port=7863, share=True)
|
memory.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"stm": [{"id": "stm-1763676283-0", "t": 1763676283, "text": "1. \"I feel so isolated and alone\"\n", "event": {"text": "1. \"I feel so isolated and alone\"\n", "emotion": {"labels": ["sad"], "scores": {"happy": 0.048, "sad": 0.713, "angry": 0.048, "anxious": 0.048, "tired": 0.048, "love": 0.048, "fear": 0.048}, "valence": -1.0, "arousal": 0.5, "tone": "gentle", "confidence": 0.35, "reasons": ["intensifiers x1.35", "sarcasm inverted positive emotions"], "spans": {"sad": [[14, 22, "isolated"], [27, 32, "alone"]]}, "ts": 1763676283.67371, "user_id": "user_001"}, "role": "user", "user_id": "user_001", "ts": 1763676283, "salience": 0.384}}, {"id": "stm-1763676296-1", "t": 1763676296, "text": "\n2. \"Nobody really understands what I'm going through\"", "event": {"text": "\n2. \"Nobody really understands what I'm going through\"", "emotion": {"labels": ["happy", "sad", "angry"], "scores": {"happy": 0.143, "sad": 0.143, "angry": 0.143, "anxious": 0.143, "tired": 0.143, "love": 0.143, "fear": 0.143}, "valence": 0.0, "arousal": 0.5, "tone": "neutral", "confidence": 0.25, "reasons": ["intensifiers x1.35", "sarcasm inverted positive emotions"], "spans": {}, "ts": 1763676296.503434, "user_id": "user_001"}, "role": "user", "user_id": "user_001", "ts": 1763676296, "salience": 0.25}}, {"id": "stm-1763676327-2", "t": 1763676327, "text": "1. \"I'm so burnt out and exhausted\"\n", "event": {"text": "1. \"I'm so burnt out and exhausted\"\n", "emotion": {"labels": ["tired"], "scores": {"happy": 0.048, "sad": 0.048, "angry": 0.048, "anxious": 0.048, "tired": 0.713, "love": 0.048, "fear": 0.048}, "valence": 0.0, "arousal": 0.4, "tone": "neutral", "confidence": 0.35, "reasons": ["intensifiers x1.35", "sarcasm inverted positive emotions"], "spans": {"tired": [[11, 20, "burnt out"], [25, 34, "exhausted"]]}, "ts": 1763676327.073478, "user_id": "user_001"}, "role": "user", "user_id": "user_001", "ts": 1763676327, "salience": 0.219}}, {"id": "stm-1763676333-3", "t": 1763676333, "text": "\n2. \"I'm completely drained and can't keep going\"", "event": {"text": "\n2. \"I'm completely drained and can't keep going\"", "emotion": {"labels": ["tired", "happy", "sad"], "scores": {"happy": 0.115, "sad": 0.115, "angry": 0.115, "anxious": 0.115, "tired": 0.312, "love": 0.115, "fear": 0.115}, "valence": 0.0, "arousal": 0.4, "tone": "neutral", "confidence": 0.35, "reasons": ["sarcasm inverted positive emotions"], "spans": {"tired": [[20, 27, "drained"]]}, "ts": 1763676333.804668, "user_id": "user_001"}, "role": "user", "user_id": "user_001", "ts": 1763676333, "salience": 0.2}}], "episodes": [], "facts": [], "meta": {"created": 1763676283, "version": "1.3.0"}}
|
requirements.txt
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
gradio>=4.44.0
|
| 2 |
+
fastmcp>=0.2.0
|
| 3 |
+
mcp>=1.0.0
|
| 4 |
+
anthropic>=0.37.0
|
| 5 |
+
python-dotenv>=1.0.0
|
| 6 |
+
plotly>=5.18.0
|
servers/__init__.py
ADDED
|
File without changes
|
servers/__pycache__/__init__.cpython-312.pyc
ADDED
|
Binary file (149 Bytes). View file
|
|
|
servers/__pycache__/emotion_server.cpython-312.pyc
ADDED
|
Binary file (15.9 kB). View file
|
|
|
servers/__pycache__/memory_server.cpython-312.pyc
ADDED
|
Binary file (29.7 kB). View file
|
|
|
servers/emotion_server.py
ADDED
|
@@ -0,0 +1,469 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# # servers/emotion_server.py
|
| 2 |
+
# from fastmcp import FastMCP, tool
|
| 3 |
+
# import re
|
| 4 |
+
|
| 5 |
+
# app = FastMCP("emotion-server")
|
| 6 |
+
|
| 7 |
+
# _PATTERNS = {
|
| 8 |
+
# "happy": r"\b(happy|grateful|excited|joy|delighted|content|optimistic)\b",
|
| 9 |
+
# "sad": r"\b(sad|down|depressed|cry|lonely|upset|miserable)\b",
|
| 10 |
+
# "angry": r"\b(angry|mad|furious|irritated|pissed|annoyed|resentful)\b",
|
| 11 |
+
# "anxious": r"\b(worried|anxious|nervous|stressed|overwhelmed|scared)\b",
|
| 12 |
+
# "tired": r"\b(tired|exhausted|drained|burnt|sleepy|fatigued)\b",
|
| 13 |
+
# "love": r"\b(love|affection|caring|fond|admire|cherish)\b",
|
| 14 |
+
# "fear": r"\b(afraid|fear|terrified|panicked|shaken)\b",
|
| 15 |
+
# }
|
| 16 |
+
|
| 17 |
+
# _TONES = {"happy":"light","love":"light","sad":"gentle","fear":"gentle",
|
| 18 |
+
# "angry":"calming","anxious":"calming","tired":"gentle"}
|
| 19 |
+
|
| 20 |
+
# def _analyze(text: str) -> dict:
|
| 21 |
+
# t = text.lower()
|
| 22 |
+
# found = [k for k,pat in _PATTERNS.items() if re.search(pat, t)]
|
| 23 |
+
# valence = 0.0
|
| 24 |
+
# if "happy" in found or "love" in found: valence += 0.6
|
| 25 |
+
# if "sad" in found or "fear" in found: valence -= 0.6
|
| 26 |
+
# if "angry" in found: valence -= 0.4
|
| 27 |
+
# if "anxious" in found: valence -= 0.3
|
| 28 |
+
# if "tired" in found: valence -= 0.2
|
| 29 |
+
# arousal = 0.5 + (0.3 if ("angry" in found or "anxious" in found) else 0) - (0.2 if "tired" in found else 0)
|
| 30 |
+
# tone = "neutral"
|
| 31 |
+
# for e in found:
|
| 32 |
+
# if e in _TONES: tone = _TONES[e]; break
|
| 33 |
+
# return {
|
| 34 |
+
# "labels": found or ["neutral"],
|
| 35 |
+
# "valence": max(-1, min(1, round(valence, 2))),
|
| 36 |
+
# "arousal": max(0, min(1, round(arousal, 2))),
|
| 37 |
+
# "tone": tone,
|
| 38 |
+
# }
|
| 39 |
+
|
| 40 |
+
# @tool
|
| 41 |
+
# def analyze(text: str) -> dict:
|
| 42 |
+
# """
|
| 43 |
+
# Analyze user text for emotion.
|
| 44 |
+
# Args:
|
| 45 |
+
# text: str - user message
|
| 46 |
+
# Returns: dict {labels, valence, arousal, tone}
|
| 47 |
+
# """
|
| 48 |
+
# return _analyze(text)
|
| 49 |
+
|
| 50 |
+
# if __name__ == "__main__":
|
| 51 |
+
# app.run() # serves MCP over stdio
|
| 52 |
+
# servers/emotion_server.py
|
| 53 |
+
from __future__ import annotations
|
| 54 |
+
|
| 55 |
+
# ---- FastMCP import shim (works across versions) ----
|
| 56 |
+
# Ensures: FastMCP is imported and `@tool` is ALWAYS a callable decorator.
|
| 57 |
+
from typing import Callable, Any
|
| 58 |
+
|
| 59 |
+
try:
|
| 60 |
+
from fastmcp import FastMCP # present across versions
|
| 61 |
+
except Exception as e:
|
| 62 |
+
raise ImportError(f"FastMCP missing: {e}")
|
| 63 |
+
|
| 64 |
+
_tool_candidate: Any = None
|
| 65 |
+
# Try common locations
|
| 66 |
+
try:
|
| 67 |
+
from fastmcp import tool as _tool_candidate # newer API: function
|
| 68 |
+
except Exception:
|
| 69 |
+
try:
|
| 70 |
+
from fastmcp.tools import tool as _tool_candidate # older API: function
|
| 71 |
+
except Exception:
|
| 72 |
+
_tool_candidate = None
|
| 73 |
+
|
| 74 |
+
# If we somehow got a module instead of a function, try attribute
|
| 75 |
+
if _tool_candidate is not None and not callable(_tool_candidate):
|
| 76 |
+
try:
|
| 77 |
+
_tool_candidate = _tool_candidate.tool # some builds expose module.tools.tool
|
| 78 |
+
except Exception:
|
| 79 |
+
_tool_candidate = None
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
def tool(*dargs, **dkwargs):
|
| 83 |
+
"""
|
| 84 |
+
Wrapper that behaves correctly in both usages:
|
| 85 |
+
@tool
|
| 86 |
+
@tool(...)
|
| 87 |
+
If real decorator exists, delegate. Otherwise:
|
| 88 |
+
- If called as @tool (i.e., first arg is fn), return fn (no-op).
|
| 89 |
+
- If called as @tool(...), return a decorator that returns fn (no-op).
|
| 90 |
+
"""
|
| 91 |
+
if callable(_tool_candidate):
|
| 92 |
+
return _tool_candidate(*dargs, **dkwargs)
|
| 93 |
+
|
| 94 |
+
# No real decorator available — provide no-op behavior.
|
| 95 |
+
if dargs and callable(dargs[0]) and not dkwargs:
|
| 96 |
+
# Used as @tool
|
| 97 |
+
fn = dargs[0]
|
| 98 |
+
return fn
|
| 99 |
+
|
| 100 |
+
# Used as @tool(...)
|
| 101 |
+
def _noop_decorator(fn):
|
| 102 |
+
return fn
|
| 103 |
+
|
| 104 |
+
return _noop_decorator
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
# ---- end shim ----
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
import re, math, time
|
| 111 |
+
from typing import Dict, List, Tuple, Optional
|
| 112 |
+
|
| 113 |
+
app = FastMCP("emotion-server")
|
| 114 |
+
|
| 115 |
+
# ---------------------------
|
| 116 |
+
# Lexicons & heuristics
|
| 117 |
+
# ---------------------------
|
| 118 |
+
EMO_LEX = {
|
| 119 |
+
"happy": r"\b(happy|grateful|excited|joy(?:ful)?|delighted|content|optimistic|glad|thrilled|yay|better|good|great|fine)\b",
|
| 120 |
+
"sad": r"\b(sad|down|depress(?:ed|ing)|cry(?:ing)?|lonely|alone|isolated|upset|miserable|heartbroken|devastat(?:ed|ing)|rejected|abandoned|invisible)\b",
|
| 121 |
+
"angry": r"\b(angry|mad|furious|irritated|pissed|pissy|annoyed|resentful|rage|hate|infuriat(?:ed|ing)|frustrat(?:ed|ing)|boiling|trapped)\b",
|
| 122 |
+
"anxious": r"\b(worried|anxious|nervous|stressed|overwhelmed|scared|uneasy|tense|on edge|freaking out|uncertain|afraid)\b",
|
| 123 |
+
"tired": r"\b(tired|exhaust(?:ed|ing)|drained|burnt(?:\s*out)?|sleepy|fatigued|worn out|depleted|no energy)\b",
|
| 124 |
+
"love": r"\b(love|affection|caring|fond|admire|cherish|adore)\b",
|
| 125 |
+
"fear": r"\b(afraid|fear|terrified|panic(?:ky|ked)?|panicked|shaken|petrified)\b",
|
| 126 |
+
}
|
| 127 |
+
|
| 128 |
+
# Emojis contribute signals even without words
|
| 129 |
+
EMOJI_SIGNAL = {
|
| 130 |
+
"happy": ["😀", "😄", "😊", "🙂", "😁", "🥳", "✨"],
|
| 131 |
+
"sad": ["😢", "😭", "😞", "😔", "☹️"],
|
| 132 |
+
"angry": ["😠", "😡", "🤬", "💢"],
|
| 133 |
+
"anxious": ["😰", "😱", "😬", "😟", "😧"],
|
| 134 |
+
"tired": ["🥱", "😪", "😴"],
|
| 135 |
+
"love": ["❤️", "💖", "💕", "😍", "🤍", "💗", "💓", "😘"],
|
| 136 |
+
"fear": ["🫣", "😨", "😱", "👀"],
|
| 137 |
+
}
|
| 138 |
+
|
| 139 |
+
NEGATORS = r"\b(no|not|never|hardly|barely|scarcely|isn['’]t|aren['’]t|can['’]t|don['’]t|doesn['’]t|won['’]t|without)\b"
|
| 140 |
+
INTENSIFIERS = {
|
| 141 |
+
r"\b(very|really|super|so|extremely|incredibly|totally|absolutely)\b": 1.35,
|
| 142 |
+
r"\b(kinda|kind of|somewhat|slightly|a bit|a little)\b": 0.75,
|
| 143 |
+
}
|
| 144 |
+
SARCASM_CUES = [
|
| 145 |
+
r"\byeah right\b",
|
| 146 |
+
r"\bsure\b",
|
| 147 |
+
r"\".+\"",
|
| 148 |
+
r"/s\b",
|
| 149 |
+
r"\bokayyy+\b",
|
| 150 |
+
r"\blol\b(?!\w)",
|
| 151 |
+
]
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
# Tone map by quadrant
|
| 155 |
+
# arousal high/low × valence pos/neg
|
| 156 |
+
def quad_tone(valence: float, arousal: float) -> str:
|
| 157 |
+
if arousal >= 0.6 and valence >= 0.1:
|
| 158 |
+
return "excited"
|
| 159 |
+
if arousal >= 0.6 and valence < -0.1:
|
| 160 |
+
return "concerned"
|
| 161 |
+
if arousal < 0.6 and valence < -0.1:
|
| 162 |
+
return "gentle"
|
| 163 |
+
if arousal < 0.6 and valence >= 0.1:
|
| 164 |
+
return "calm"
|
| 165 |
+
return "neutral"
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
# ---------------------------
|
| 169 |
+
# Utilities
|
| 170 |
+
# ---------------------------
|
| 171 |
+
_compiled = {k: re.compile(p, re.I) for k, p in EMO_LEX.items()}
|
| 172 |
+
_neg_pat = re.compile(NEGATORS, re.I)
|
| 173 |
+
_int_pats = [(re.compile(p, re.I), w) for p, w in INTENSIFIERS.items()]
|
| 174 |
+
_sarcasm = [re.compile(p, re.I) for p in SARCASM_CUES]
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
def _emoji_hits(text: str) -> Dict[str, int]:
|
| 178 |
+
hits = {k: 0 for k in EMO_LEX}
|
| 179 |
+
for emo, arr in EMOJI_SIGNAL.items():
|
| 180 |
+
for e in arr:
|
| 181 |
+
hits[emo] += text.count(e)
|
| 182 |
+
return hits
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
def _intensity_multiplier(text: str) -> float:
|
| 186 |
+
mult = 1.0
|
| 187 |
+
for pat, w in _int_pats:
|
| 188 |
+
if pat.search(text):
|
| 189 |
+
mult *= w
|
| 190 |
+
# Exclamation marks increase arousal a bit (cap effect)
|
| 191 |
+
bangs = min(text.count("!"), 5)
|
| 192 |
+
mult *= 1.0 + 0.04 * bangs
|
| 193 |
+
# ALL CAPS word run nudges intensity
|
| 194 |
+
if re.search(r"\b[A-Z]{3,}\b", text):
|
| 195 |
+
mult *= 1.08
|
| 196 |
+
return max(0.5, min(1.8, mult))
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
def _negation_factor(text: str, span_start: int) -> float:
|
| 200 |
+
"""
|
| 201 |
+
Look 5 words (~40 chars) backwards for a negator.
|
| 202 |
+
If present, invert or dampen signal.
|
| 203 |
+
Stop at clause boundaries (comma, period, semicolon) to avoid cross-clause negation.
|
| 204 |
+
"""
|
| 205 |
+
window_start = max(0, span_start - 40)
|
| 206 |
+
window = text[window_start:span_start]
|
| 207 |
+
|
| 208 |
+
# Stop at last clause boundary (comma, period, semicolon, colon)
|
| 209 |
+
# This prevents "not responded" from negating "nervous" in "not responded, I'm nervous"
|
| 210 |
+
last_boundary = max(
|
| 211 |
+
window.rfind(","), window.rfind("."), window.rfind(";"), window.rfind(":")
|
| 212 |
+
)
|
| 213 |
+
if last_boundary != -1:
|
| 214 |
+
# Only look after the last boundary
|
| 215 |
+
window = window[last_boundary + 1 :]
|
| 216 |
+
|
| 217 |
+
if _neg_pat.search(window):
|
| 218 |
+
return -0.7 # invert and dampen
|
| 219 |
+
return 1.0
|
| 220 |
+
|
| 221 |
+
|
| 222 |
+
def _sarcasm_penalty(text: str) -> float:
|
| 223 |
+
return 0.85 if any(p.search(text) for p in _sarcasm) else 1.0
|
| 224 |
+
|
| 225 |
+
|
| 226 |
+
def _softmax(d: Dict[str, float]) -> Dict[str, float]:
|
| 227 |
+
xs = list(d.values())
|
| 228 |
+
if not xs:
|
| 229 |
+
return d
|
| 230 |
+
m = max(xs)
|
| 231 |
+
exps = [math.exp(x - m) for x in xs]
|
| 232 |
+
s = sum(exps) or 1.0
|
| 233 |
+
return {k: exps[i] / s for i, k in enumerate(d.keys())}
|
| 234 |
+
|
| 235 |
+
|
| 236 |
+
# ---------------------------
|
| 237 |
+
# Per-user calibration (in-memory)
|
| 238 |
+
# ---------------------------
|
| 239 |
+
CALIBRATION: Dict[str, Dict[str, float]] = (
|
| 240 |
+
{}
|
| 241 |
+
) # user_id -> {bias_emo: float, arousal_bias: float, valence_bias: float}
|
| 242 |
+
|
| 243 |
+
|
| 244 |
+
def _apply_calibration(
|
| 245 |
+
user_id: Optional[str], emo_scores: Dict[str, float], valence: float, arousal: float
|
| 246 |
+
):
|
| 247 |
+
if not user_id or user_id not in CALIBRATION:
|
| 248 |
+
return emo_scores, valence, arousal
|
| 249 |
+
calib = CALIBRATION[user_id]
|
| 250 |
+
# shift emotions
|
| 251 |
+
for k, bias in calib.items():
|
| 252 |
+
if k in emo_scores:
|
| 253 |
+
emo_scores[k] += bias * 0.2
|
| 254 |
+
# dedicated valence/arousal bias keys if present
|
| 255 |
+
valence += calib.get("valence_bias", 0.0) * 0.15
|
| 256 |
+
arousal += calib.get("arousal_bias", 0.0) * 0.15
|
| 257 |
+
return emo_scores, valence, arousal
|
| 258 |
+
|
| 259 |
+
|
| 260 |
+
# ---------------------------
|
| 261 |
+
# Core analysis
|
| 262 |
+
# ---------------------------
|
| 263 |
+
def _analyze(text: str, user_id: Optional[str] = None) -> dict:
|
| 264 |
+
t = text or ""
|
| 265 |
+
tl = t.lower()
|
| 266 |
+
|
| 267 |
+
# Base scores from lexicon hits
|
| 268 |
+
emo_scores: Dict[str, float] = {k: 0.0 for k in EMO_LEX}
|
| 269 |
+
spans: Dict[str, List[Tuple[int, int, str]]] = {k: [] for k in EMO_LEX}
|
| 270 |
+
|
| 271 |
+
for emo, pat in _compiled.items():
|
| 272 |
+
for m in pat.finditer(tl):
|
| 273 |
+
factor = _negation_factor(tl, m.start())
|
| 274 |
+
emo_scores[emo] += 1.0 * factor
|
| 275 |
+
spans[emo].append((m.start(), m.end(), tl[m.start() : m.end()]))
|
| 276 |
+
|
| 277 |
+
# Emoji contributions
|
| 278 |
+
e_hits = _emoji_hits(t)
|
| 279 |
+
for emo, c in e_hits.items():
|
| 280 |
+
if c:
|
| 281 |
+
emo_scores[emo] += 0.6 * c
|
| 282 |
+
|
| 283 |
+
# Check for sarcasm - if detected, invert positive emotions
|
| 284 |
+
sarcasm_detected = any(p.search(tl) for p in _sarcasm)
|
| 285 |
+
|
| 286 |
+
if sarcasm_detected:
|
| 287 |
+
# Sarcasm inverts positive emotions to negative
|
| 288 |
+
# "yeah right, like they care" → anger/sadness, not love
|
| 289 |
+
happy_score = emo_scores["happy"]
|
| 290 |
+
love_score = emo_scores["love"]
|
| 291 |
+
|
| 292 |
+
if happy_score > 0 or love_score > 0:
|
| 293 |
+
# Transfer positive emotion scores to anger/sad
|
| 294 |
+
emo_scores["angry"] += happy_score * 0.8
|
| 295 |
+
emo_scores["sad"] += love_score * 0.8
|
| 296 |
+
emo_scores["happy"] = 0.0
|
| 297 |
+
emo_scores["love"] = 0.0
|
| 298 |
+
|
| 299 |
+
# Intensifiers / punctuation adjustments (global)
|
| 300 |
+
intensity = _intensity_multiplier(t)
|
| 301 |
+
|
| 302 |
+
for emo in emo_scores:
|
| 303 |
+
emo_scores[emo] *= intensity
|
| 304 |
+
|
| 305 |
+
# Map to valence/arousal
|
| 306 |
+
pos = emo_scores["happy"] + emo_scores["love"]
|
| 307 |
+
neg = (
|
| 308 |
+
emo_scores["sad"]
|
| 309 |
+
+ emo_scores["fear"]
|
| 310 |
+
+ 0.9 * emo_scores["angry"]
|
| 311 |
+
+ 0.6 * emo_scores["anxious"]
|
| 312 |
+
)
|
| 313 |
+
valence = max(-1.0, min(1.0, round((pos - neg) * 0.4, 3)))
|
| 314 |
+
|
| 315 |
+
base_arousal = 0.5
|
| 316 |
+
arousal = (
|
| 317 |
+
base_arousal
|
| 318 |
+
+ 0.12 * (emo_scores["angry"] > 0)
|
| 319 |
+
+ 0.08 * (emo_scores["anxious"] > 0)
|
| 320 |
+
- 0.10 * (emo_scores["tired"] > 0)
|
| 321 |
+
+ 0.02 * min(t.count("!"), 5)
|
| 322 |
+
)
|
| 323 |
+
|
| 324 |
+
arousal = max(0.0, min(1.0, round(arousal, 3)))
|
| 325 |
+
|
| 326 |
+
# Confidence: count signals + consistency
|
| 327 |
+
hits = sum(1 for v in emo_scores.values() if abs(v) > 0.01) + sum(e_hits.values())
|
| 328 |
+
consistency = 0.0
|
| 329 |
+
if hits:
|
| 330 |
+
top2 = sorted(emo_scores.items(), key=lambda kv: kv[1], reverse=True)[:2]
|
| 331 |
+
if len(top2) == 2 and top2[1][1] > 0:
|
| 332 |
+
ratio = top2[0][1] / (top2[1][1] + 1e-6)
|
| 333 |
+
consistency = max(
|
| 334 |
+
0.0, min(1.0, (ratio - 1) / 3)
|
| 335 |
+
) # >1 means some separation
|
| 336 |
+
elif len(top2) == 1:
|
| 337 |
+
consistency = 0.6
|
| 338 |
+
conf = max(0.0, min(1.0, 0.25 + 0.1 * hits + 0.5 * consistency))
|
| 339 |
+
# downweight very short texts
|
| 340 |
+
if len(t.strip()) < 6:
|
| 341 |
+
conf *= 0.6
|
| 342 |
+
|
| 343 |
+
# Normalize emotions to pseudo-probs (softmax over positive scores)
|
| 344 |
+
pos_scores = {k: max(0.0, v) for k, v in emo_scores.items()}
|
| 345 |
+
probs = _softmax(pos_scores)
|
| 346 |
+
|
| 347 |
+
# Apply per-user calibration
|
| 348 |
+
probs, valence, arousal = _apply_calibration(user_id, probs, valence, arousal)
|
| 349 |
+
|
| 350 |
+
# Tone
|
| 351 |
+
tone = quad_tone(valence, arousal)
|
| 352 |
+
|
| 353 |
+
# Explanations
|
| 354 |
+
reasons = []
|
| 355 |
+
if intensity > 1.0:
|
| 356 |
+
reasons.append(f"intensifiers x{intensity:.2f}")
|
| 357 |
+
if sarcasm_detected:
|
| 358 |
+
reasons.append("sarcasm inverted positive emotions")
|
| 359 |
+
if any(
|
| 360 |
+
_neg_pat.search(tl[max(0, s - 40) : s])
|
| 361 |
+
for emo, spans_ in spans.items()
|
| 362 |
+
for (s, _, _) in spans_
|
| 363 |
+
):
|
| 364 |
+
reasons.append("negation near emotion tokens")
|
| 365 |
+
if any(e_hits.values()):
|
| 366 |
+
reasons.append("emoji signals")
|
| 367 |
+
|
| 368 |
+
labels_sorted = sorted(probs.items(), key=lambda kv: kv[1], reverse=True)
|
| 369 |
+
top_labels = [k for k, v in labels_sorted[:3] if v > 0.05] or ["neutral"]
|
| 370 |
+
|
| 371 |
+
return {
|
| 372 |
+
"labels": top_labels,
|
| 373 |
+
"scores": {k: round(v, 3) for k, v in probs.items()},
|
| 374 |
+
"valence": round(valence, 3),
|
| 375 |
+
"arousal": round(arousal, 3),
|
| 376 |
+
"tone": tone,
|
| 377 |
+
"confidence": round(conf, 3),
|
| 378 |
+
"reasons": reasons,
|
| 379 |
+
"spans": {k: spans[k] for k in top_labels if spans.get(k)},
|
| 380 |
+
"ts": time.time(),
|
| 381 |
+
"user_id": user_id,
|
| 382 |
+
}
|
| 383 |
+
|
| 384 |
+
|
| 385 |
+
# ---------------------------
|
| 386 |
+
# MCP tools
|
| 387 |
+
# ---------------------------
|
| 388 |
+
|
| 389 |
+
|
| 390 |
+
@app.tool()
|
| 391 |
+
def analyze(text: str, user_id: Optional[str] = None) -> dict:
|
| 392 |
+
"""
|
| 393 |
+
Analyze text for emotion.
|
| 394 |
+
Args:
|
| 395 |
+
text: user message
|
| 396 |
+
user_id: optional user key for calibration
|
| 397 |
+
Returns:
|
| 398 |
+
dict with labels, scores (per emotion), valence [-1..1], arousal [0..1],
|
| 399 |
+
tone (calm/neutral/excited/concerned/gentle), confidence, reasons, spans.
|
| 400 |
+
"""
|
| 401 |
+
return _analyze(text, user_id=user_id)
|
| 402 |
+
|
| 403 |
+
|
| 404 |
+
@app.tool()
|
| 405 |
+
def batch_analyze(messages: List[str], user_id: Optional[str] = None) -> List[dict]:
|
| 406 |
+
"""
|
| 407 |
+
Batch analyze a list of messages.
|
| 408 |
+
"""
|
| 409 |
+
return [_analyze(m or "", user_id=user_id) for m in messages]
|
| 410 |
+
|
| 411 |
+
|
| 412 |
+
@app.tool()
|
| 413 |
+
def calibrate(
|
| 414 |
+
user_id: str,
|
| 415 |
+
bias: Dict[str, float] = None,
|
| 416 |
+
arousal_bias: float = 0.0,
|
| 417 |
+
valence_bias: float = 0.0,
|
| 418 |
+
) -> dict:
|
| 419 |
+
"""
|
| 420 |
+
Adjust per-user calibration.
|
| 421 |
+
- bias: e.g. {"anxious": -0.1, "love": 0.1}
|
| 422 |
+
- arousal_bias/valence_bias: small nudges (-1..1) applied after scoring.
|
| 423 |
+
"""
|
| 424 |
+
if user_id not in CALIBRATION:
|
| 425 |
+
CALIBRATION[user_id] = {}
|
| 426 |
+
if bias:
|
| 427 |
+
for k, v in bias.items():
|
| 428 |
+
CALIBRATION[user_id][k] = float(v)
|
| 429 |
+
if arousal_bias:
|
| 430 |
+
CALIBRATION[user_id]["arousal_bias"] = float(arousal_bias)
|
| 431 |
+
if valence_bias:
|
| 432 |
+
CALIBRATION[user_id]["valence_bias"] = float(valence_bias)
|
| 433 |
+
return {"ok": True, "calibration": CALIBRATION[user_id]}
|
| 434 |
+
|
| 435 |
+
|
| 436 |
+
@app.tool()
|
| 437 |
+
def reset_calibration(user_id: str) -> dict:
|
| 438 |
+
"""Remove per-user calibration."""
|
| 439 |
+
CALIBRATION.pop(user_id, None)
|
| 440 |
+
return {"ok": True}
|
| 441 |
+
|
| 442 |
+
|
| 443 |
+
@app.tool()
|
| 444 |
+
def health() -> dict:
|
| 445 |
+
"""Simple health check for MCP status chips."""
|
| 446 |
+
return {"status": "ok", "version": "1.2.0", "time": time.time()}
|
| 447 |
+
|
| 448 |
+
|
| 449 |
+
@app.tool()
|
| 450 |
+
def version() -> dict:
|
| 451 |
+
"""Return server version & feature flags."""
|
| 452 |
+
return {
|
| 453 |
+
"name": "emotion-server",
|
| 454 |
+
"version": "1.2.0",
|
| 455 |
+
"features": [
|
| 456 |
+
"negation",
|
| 457 |
+
"intensifiers",
|
| 458 |
+
"emoji",
|
| 459 |
+
"sarcasm",
|
| 460 |
+
"confidence",
|
| 461 |
+
"batch",
|
| 462 |
+
"calibration",
|
| 463 |
+
],
|
| 464 |
+
"emotions": list(EMO_LEX.keys()),
|
| 465 |
+
}
|
| 466 |
+
|
| 467 |
+
|
| 468 |
+
if __name__ == "__main__":
|
| 469 |
+
app.run() # serves MCP over stdio
|
servers/emotion_server.py.bak
ADDED
|
@@ -0,0 +1,377 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# # servers/emotion_server.py
|
| 2 |
+
# from fastmcp import FastMCP, tool
|
| 3 |
+
# import re
|
| 4 |
+
|
| 5 |
+
# app = FastMCP("emotion-server")
|
| 6 |
+
|
| 7 |
+
# _PATTERNS = {
|
| 8 |
+
# "happy": r"\b(happy|grateful|excited|joy|delighted|content|optimistic)\b",
|
| 9 |
+
# "sad": r"\b(sad|down|depressed|cry|lonely|upset|miserable)\b",
|
| 10 |
+
# "angry": r"\b(angry|mad|furious|irritated|pissed|annoyed|resentful)\b",
|
| 11 |
+
# "anxious": r"\b(worried|anxious|nervous|stressed|overwhelmed|scared)\b",
|
| 12 |
+
# "tired": r"\b(tired|exhausted|drained|burnt|sleepy|fatigued)\b",
|
| 13 |
+
# "love": r"\b(love|affection|caring|fond|admire|cherish)\b",
|
| 14 |
+
# "fear": r"\b(afraid|fear|terrified|panicked|shaken)\b",
|
| 15 |
+
# }
|
| 16 |
+
|
| 17 |
+
# _TONES = {"happy":"light","love":"light","sad":"gentle","fear":"gentle",
|
| 18 |
+
# "angry":"calming","anxious":"calming","tired":"gentle"}
|
| 19 |
+
|
| 20 |
+
# def _analyze(text: str) -> dict:
|
| 21 |
+
# t = text.lower()
|
| 22 |
+
# found = [k for k,pat in _PATTERNS.items() if re.search(pat, t)]
|
| 23 |
+
# valence = 0.0
|
| 24 |
+
# if "happy" in found or "love" in found: valence += 0.6
|
| 25 |
+
# if "sad" in found or "fear" in found: valence -= 0.6
|
| 26 |
+
# if "angry" in found: valence -= 0.4
|
| 27 |
+
# if "anxious" in found: valence -= 0.3
|
| 28 |
+
# if "tired" in found: valence -= 0.2
|
| 29 |
+
# arousal = 0.5 + (0.3 if ("angry" in found or "anxious" in found) else 0) - (0.2 if "tired" in found else 0)
|
| 30 |
+
# tone = "neutral"
|
| 31 |
+
# for e in found:
|
| 32 |
+
# if e in _TONES: tone = _TONES[e]; break
|
| 33 |
+
# return {
|
| 34 |
+
# "labels": found or ["neutral"],
|
| 35 |
+
# "valence": max(-1, min(1, round(valence, 2))),
|
| 36 |
+
# "arousal": max(0, min(1, round(arousal, 2))),
|
| 37 |
+
# "tone": tone,
|
| 38 |
+
# }
|
| 39 |
+
|
| 40 |
+
# @tool
|
| 41 |
+
# def analyze(text: str) -> dict:
|
| 42 |
+
# """
|
| 43 |
+
# Analyze user text for emotion.
|
| 44 |
+
# Args:
|
| 45 |
+
# text: str - user message
|
| 46 |
+
# Returns: dict {labels, valence, arousal, tone}
|
| 47 |
+
# """
|
| 48 |
+
# return _analyze(text)
|
| 49 |
+
|
| 50 |
+
# if __name__ == "__main__":
|
| 51 |
+
# app.run() # serves MCP over stdio
|
| 52 |
+
# servers/emotion_server.py
|
| 53 |
+
from __future__ import annotations
|
| 54 |
+
# ---- FastMCP import shim (works across versions) ----
|
| 55 |
+
# Ensures: FastMCP is imported and `@tool` is ALWAYS a callable decorator.
|
| 56 |
+
from typing import Callable, Any
|
| 57 |
+
|
| 58 |
+
try:
|
| 59 |
+
from fastmcp import FastMCP # present across versions
|
| 60 |
+
except Exception as e:
|
| 61 |
+
raise ImportError(f"FastMCP missing: {e}")
|
| 62 |
+
|
| 63 |
+
_tool_candidate: Any = None
|
| 64 |
+
# Try common locations
|
| 65 |
+
try:
|
| 66 |
+
from fastmcp import tool as _tool_candidate # newer API: function
|
| 67 |
+
except Exception:
|
| 68 |
+
try:
|
| 69 |
+
from fastmcp.tools import tool as _tool_candidate # older API: function
|
| 70 |
+
except Exception:
|
| 71 |
+
_tool_candidate = None
|
| 72 |
+
|
| 73 |
+
# If we somehow got a module instead of a function, try attribute
|
| 74 |
+
if _tool_candidate is not None and not callable(_tool_candidate):
|
| 75 |
+
try:
|
| 76 |
+
_tool_candidate = _tool_candidate.tool # some builds expose module.tools.tool
|
| 77 |
+
except Exception:
|
| 78 |
+
_tool_candidate = None
|
| 79 |
+
|
| 80 |
+
def tool(*dargs, **dkwargs):
|
| 81 |
+
"""
|
| 82 |
+
Wrapper that behaves correctly in both usages:
|
| 83 |
+
@tool
|
| 84 |
+
@tool(...)
|
| 85 |
+
If real decorator exists, delegate. Otherwise:
|
| 86 |
+
- If called as @tool (i.e., first arg is fn), return fn (no-op).
|
| 87 |
+
- If called as @tool(...), return a decorator that returns fn (no-op).
|
| 88 |
+
"""
|
| 89 |
+
if callable(_tool_candidate):
|
| 90 |
+
return _tool_candidate(*dargs, **dkwargs)
|
| 91 |
+
|
| 92 |
+
# No real decorator available — provide no-op behavior.
|
| 93 |
+
if dargs and callable(dargs[0]) and not dkwargs:
|
| 94 |
+
# Used as @tool
|
| 95 |
+
fn = dargs[0]
|
| 96 |
+
return fn
|
| 97 |
+
|
| 98 |
+
# Used as @tool(...)
|
| 99 |
+
def _noop_decorator(fn):
|
| 100 |
+
return fn
|
| 101 |
+
return _noop_decorator
|
| 102 |
+
# ---- end shim ----
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
import re, math, time
|
| 106 |
+
from typing import Dict, List, Tuple, Optional
|
| 107 |
+
|
| 108 |
+
app = FastMCP("emotion-server")
|
| 109 |
+
|
| 110 |
+
# ---------------------------
|
| 111 |
+
# Lexicons & heuristics
|
| 112 |
+
# ---------------------------
|
| 113 |
+
EMO_LEX = {
|
| 114 |
+
"happy": r"\b(happy|grateful|excited|joy(?:ful)?|delighted|content|optimistic|glad|thrilled|yay)\b",
|
| 115 |
+
"sad": r"\b(sad|down|depress(?:ed|ing)|cry(?:ing)?|lonely|upset|miserable|heartbroken)\b",
|
| 116 |
+
"angry": r"\b(angry|mad|furious|irritated|pissed|annoyed|resentful|rage|hate)\b",
|
| 117 |
+
"anxious": r"\b(worried|anxious|nervous|stressed|overwhelmed|scared|uneasy|tense|on edge)\b",
|
| 118 |
+
"tired": r"\b(tired|exhaust(?:ed|ing)|drained|burnt(?:\s*out)?|sleepy|fatigued|worn out)\b",
|
| 119 |
+
"love": r"\b(love|affection|caring|fond|admire|cherish|adore)\b",
|
| 120 |
+
"fear": r"\b(afraid|fear|terrified|panic(?:ky|ked)?|panicked|shaken|petrified)\b",
|
| 121 |
+
}
|
| 122 |
+
|
| 123 |
+
# Emojis contribute signals even without words
|
| 124 |
+
EMOJI_SIGNAL = {
|
| 125 |
+
"happy": ["😀","😄","😊","🙂","😁","🥳","✨"],
|
| 126 |
+
"sad": ["😢","😭","😞","😔","☹️"],
|
| 127 |
+
"angry": ["😠","😡","🤬","💢"],
|
| 128 |
+
"anxious":["😰","😱","😬","😟","😧"],
|
| 129 |
+
"tired": ["🥱","😪","😴"],
|
| 130 |
+
"love": ["❤️","💖","💕","😍","🤍","💗","💓","😘"],
|
| 131 |
+
"fear": ["🫣","😨","😱","👀"],
|
| 132 |
+
}
|
| 133 |
+
|
| 134 |
+
NEGATORS = r"\b(no|not|never|hardly|barely|scarcely|isn['’]t|aren['’]t|can['’]t|don['’]t|doesn['’]t|won['’]t|without)\b"
|
| 135 |
+
INTENSIFIERS = {
|
| 136 |
+
r"\b(very|really|super|so|extremely|incredibly|totally|absolutely)\b": 1.35,
|
| 137 |
+
r"\b(kinda|kind of|somewhat|slightly|a bit|a little)\b": 0.75,
|
| 138 |
+
}
|
| 139 |
+
SARCASM_CUES = [
|
| 140 |
+
r"\byeah right\b", r"\bsure\b", r"\".+\"", r"/s\b", r"\bokayyy+\b", r"\blol\b(?!\w)"
|
| 141 |
+
]
|
| 142 |
+
|
| 143 |
+
# Tone map by quadrant
|
| 144 |
+
# arousal high/low × valence pos/neg
|
| 145 |
+
def quad_tone(valence: float, arousal: float) -> str:
|
| 146 |
+
if arousal >= 0.6 and valence >= 0.1: return "excited"
|
| 147 |
+
if arousal >= 0.6 and valence < -0.1: return "concerned"
|
| 148 |
+
if arousal < 0.6 and valence < -0.1: return "gentle"
|
| 149 |
+
if arousal < 0.6 and valence >= 0.1: return "calm"
|
| 150 |
+
return "neutral"
|
| 151 |
+
|
| 152 |
+
# ---------------------------
|
| 153 |
+
# Utilities
|
| 154 |
+
# ---------------------------
|
| 155 |
+
_compiled = {k: re.compile(p, re.I) for k, p in EMO_LEX.items()}
|
| 156 |
+
_neg_pat = re.compile(NEGATORS, re.I)
|
| 157 |
+
_int_pats = [(re.compile(p, re.I), w) for p, w in INTENSIFIERS.items()]
|
| 158 |
+
_sarcasm = [re.compile(p, re.I) for p in SARCASM_CUES]
|
| 159 |
+
|
| 160 |
+
def _emoji_hits(text: str) -> Dict[str, int]:
|
| 161 |
+
hits = {k: 0 for k in EMO_LEX}
|
| 162 |
+
for emo, arr in EMOJI_SIGNAL.items():
|
| 163 |
+
for e in arr:
|
| 164 |
+
hits[emo] += text.count(e)
|
| 165 |
+
return hits
|
| 166 |
+
|
| 167 |
+
def _intensity_multiplier(text: str) -> float:
|
| 168 |
+
mult = 1.0
|
| 169 |
+
for pat, w in _int_pats:
|
| 170 |
+
if pat.search(text):
|
| 171 |
+
mult *= w
|
| 172 |
+
# Exclamation marks increase arousal a bit (cap effect)
|
| 173 |
+
bangs = min(text.count("!"), 5)
|
| 174 |
+
mult *= (1.0 + 0.04 * bangs)
|
| 175 |
+
# ALL CAPS word run nudges intensity
|
| 176 |
+
if re.search(r"\b[A-Z]{3,}\b", text):
|
| 177 |
+
mult *= 1.08
|
| 178 |
+
return max(0.5, min(1.8, mult))
|
| 179 |
+
|
| 180 |
+
def _negation_factor(text: str, span_start: int) -> float:
|
| 181 |
+
"""
|
| 182 |
+
Look 5 words (~40 chars) backwards for a negator.
|
| 183 |
+
If present, invert or dampen signal.
|
| 184 |
+
"""
|
| 185 |
+
window_start = max(0, span_start - 40)
|
| 186 |
+
window = text[window_start:span_start]
|
| 187 |
+
if _neg_pat.search(window):
|
| 188 |
+
return -0.7 # invert and dampen
|
| 189 |
+
return 1.0
|
| 190 |
+
|
| 191 |
+
def _sarcasm_penalty(text: str) -> float:
|
| 192 |
+
return 0.85 if any(p.search(text) for p in _sarcasm) else 1.0
|
| 193 |
+
|
| 194 |
+
def _softmax(d: Dict[str, float]) -> Dict[str, float]:
|
| 195 |
+
xs = list(d.values())
|
| 196 |
+
if not xs: return d
|
| 197 |
+
m = max(xs)
|
| 198 |
+
exps = [math.exp(x - m) for x in xs]
|
| 199 |
+
s = sum(exps) or 1.0
|
| 200 |
+
return {k: exps[i] / s for i, k in enumerate(d.keys())}
|
| 201 |
+
|
| 202 |
+
# ---------------------------
|
| 203 |
+
# Per-user calibration (in-memory)
|
| 204 |
+
# ---------------------------
|
| 205 |
+
CALIBRATION: Dict[str, Dict[str, float]] = {} # user_id -> {bias_emo: float, arousal_bias: float, valence_bias: float}
|
| 206 |
+
|
| 207 |
+
def _apply_calibration(user_id: Optional[str], emo_scores: Dict[str, float], valence: float, arousal: float):
|
| 208 |
+
if not user_id or user_id not in CALIBRATION:
|
| 209 |
+
return emo_scores, valence, arousal
|
| 210 |
+
calib = CALIBRATION[user_id]
|
| 211 |
+
# shift emotions
|
| 212 |
+
for k, bias in calib.items():
|
| 213 |
+
if k in emo_scores:
|
| 214 |
+
emo_scores[k] += bias * 0.2
|
| 215 |
+
# dedicated valence/arousal bias keys if present
|
| 216 |
+
valence += calib.get("valence_bias", 0.0) * 0.15
|
| 217 |
+
arousal += calib.get("arousal_bias", 0.0) * 0.15
|
| 218 |
+
return emo_scores, valence, arousal
|
| 219 |
+
|
| 220 |
+
# ---------------------------
|
| 221 |
+
# Core analysis
|
| 222 |
+
# ---------------------------
|
| 223 |
+
def _analyze(text: str, user_id: Optional[str] = None) -> dict:
|
| 224 |
+
t = text or ""
|
| 225 |
+
tl = t.lower()
|
| 226 |
+
|
| 227 |
+
# Base scores from lexicon hits
|
| 228 |
+
emo_scores: Dict[str, float] = {k: 0.0 for k in EMO_LEX}
|
| 229 |
+
spans: Dict[str, List[Tuple[int, int, str]]] = {k: [] for k in EMO_LEX}
|
| 230 |
+
|
| 231 |
+
for emo, pat in _compiled.items():
|
| 232 |
+
for m in pat.finditer(tl):
|
| 233 |
+
factor = _negation_factor(tl, m.start())
|
| 234 |
+
emo_scores[emo] += 1.0 * factor
|
| 235 |
+
spans[emo].append((m.start(), m.end(), tl[m.start():m.end()]))
|
| 236 |
+
|
| 237 |
+
# Emoji contributions
|
| 238 |
+
e_hits = _emoji_hits(t)
|
| 239 |
+
for emo, c in e_hits.items():
|
| 240 |
+
if c:
|
| 241 |
+
emo_scores[emo] += 0.6 * c
|
| 242 |
+
|
| 243 |
+
# Intensifiers / sarcasm / punctuation adjustments (global)
|
| 244 |
+
intensity = _intensity_multiplier(t)
|
| 245 |
+
sarcasm_mult = _sarcasm_penalty(t)
|
| 246 |
+
|
| 247 |
+
for emo in emo_scores:
|
| 248 |
+
emo_scores[emo] *= intensity * sarcasm_mult
|
| 249 |
+
|
| 250 |
+
# Map to valence/arousal
|
| 251 |
+
pos = emo_scores["happy"] + emo_scores["love"]
|
| 252 |
+
neg = emo_scores["sad"] + emo_scores["fear"] + 0.9 * emo_scores["angry"] + 0.6 * emo_scores["anxious"]
|
| 253 |
+
valence = max(-1.0, min(1.0, round((pos - neg) * 0.4, 3)))
|
| 254 |
+
|
| 255 |
+
base_arousal = 0.5
|
| 256 |
+
arousal = base_arousal \
|
| 257 |
+
+ 0.12 * (emo_scores["angry"] > 0) \
|
| 258 |
+
+ 0.08 * (emo_scores["anxious"] > 0) \
|
| 259 |
+
- 0.10 * (emo_scores["tired"] > 0) \
|
| 260 |
+
+ 0.02 * min(t.count("!"), 5)
|
| 261 |
+
|
| 262 |
+
arousal = max(0.0, min(1.0, round(arousal, 3)))
|
| 263 |
+
|
| 264 |
+
# Confidence: count signals + consistency
|
| 265 |
+
hits = sum(1 for v in emo_scores.values() if abs(v) > 0.01) + sum(e_hits.values())
|
| 266 |
+
consistency = 0.0
|
| 267 |
+
if hits:
|
| 268 |
+
top2 = sorted(emo_scores.items(), key=lambda kv: kv[1], reverse=True)[:2]
|
| 269 |
+
if len(top2) == 2 and top2[1][1] > 0:
|
| 270 |
+
ratio = top2[0][1] / (top2[1][1] + 1e-6)
|
| 271 |
+
consistency = max(0.0, min(1.0, (ratio - 1) / 3)) # >1 means some separation
|
| 272 |
+
elif len(top2) == 1:
|
| 273 |
+
consistency = 0.6
|
| 274 |
+
conf = max(0.0, min(1.0, 0.25 + 0.1 * hits + 0.5 * consistency))
|
| 275 |
+
# downweight very short texts
|
| 276 |
+
if len(t.strip()) < 6:
|
| 277 |
+
conf *= 0.6
|
| 278 |
+
|
| 279 |
+
# Normalize emotions to pseudo-probs (softmax over positive scores)
|
| 280 |
+
pos_scores = {k: max(0.0, v) for k, v in emo_scores.items()}
|
| 281 |
+
probs = _softmax(pos_scores)
|
| 282 |
+
|
| 283 |
+
# Apply per-user calibration
|
| 284 |
+
probs, valence, arousal = _apply_calibration(user_id, probs, valence, arousal)
|
| 285 |
+
|
| 286 |
+
# Tone
|
| 287 |
+
tone = quad_tone(valence, arousal)
|
| 288 |
+
|
| 289 |
+
# Explanations
|
| 290 |
+
reasons = []
|
| 291 |
+
if intensity > 1.0: reasons.append(f"intensifiers x{intensity:.2f}")
|
| 292 |
+
if sarcasm_mult < 1.0: reasons.append("sarcasm cues detected")
|
| 293 |
+
if any(_neg_pat.search(tl[max(0,s-40):s]) for emo, spans_ in spans.items() for (s,_,_) in spans_):
|
| 294 |
+
reasons.append("negation near emotion tokens")
|
| 295 |
+
if any(e_hits.values()): reasons.append("emoji signals")
|
| 296 |
+
|
| 297 |
+
labels_sorted = sorted(probs.items(), key=lambda kv: kv[1], reverse=True)
|
| 298 |
+
top_labels = [k for k, v in labels_sorted[:3] if v > 0.05] or ["neutral"]
|
| 299 |
+
|
| 300 |
+
return {
|
| 301 |
+
"labels": top_labels,
|
| 302 |
+
"scores": {k: round(v, 3) for k, v in probs.items()},
|
| 303 |
+
"valence": round(valence, 3),
|
| 304 |
+
"arousal": round(arousal, 3),
|
| 305 |
+
"tone": tone,
|
| 306 |
+
"confidence": round(conf, 3),
|
| 307 |
+
"reasons": reasons,
|
| 308 |
+
"spans": {k: spans[k] for k in top_labels if spans.get(k)},
|
| 309 |
+
"ts": time.time(),
|
| 310 |
+
"user_id": user_id,
|
| 311 |
+
}
|
| 312 |
+
|
| 313 |
+
# ---------------------------
|
| 314 |
+
# MCP tools
|
| 315 |
+
# ---------------------------
|
| 316 |
+
|
| 317 |
+
@app.tool()
|
| 318 |
+
def analyze(text: str, user_id: Optional[str] = None) -> dict:
|
| 319 |
+
"""
|
| 320 |
+
Analyze text for emotion.
|
| 321 |
+
Args:
|
| 322 |
+
text: user message
|
| 323 |
+
user_id: optional user key for calibration
|
| 324 |
+
Returns:
|
| 325 |
+
dict with labels, scores (per emotion), valence [-1..1], arousal [0..1],
|
| 326 |
+
tone (calm/neutral/excited/concerned/gentle), confidence, reasons, spans.
|
| 327 |
+
"""
|
| 328 |
+
return _analyze(text, user_id=user_id)
|
| 329 |
+
|
| 330 |
+
@app.tool()
|
| 331 |
+
def batch_analyze(messages: List[str], user_id: Optional[str] = None) -> List[dict]:
|
| 332 |
+
"""
|
| 333 |
+
Batch analyze a list of messages.
|
| 334 |
+
"""
|
| 335 |
+
return [_analyze(m or "", user_id=user_id) for m in messages]
|
| 336 |
+
|
| 337 |
+
@app.tool()
|
| 338 |
+
def calibrate(user_id: str, bias: Dict[str, float] = None, arousal_bias: float = 0.0, valence_bias: float = 0.0) -> dict:
|
| 339 |
+
"""
|
| 340 |
+
Adjust per-user calibration.
|
| 341 |
+
- bias: e.g. {"anxious": -0.1, "love": 0.1}
|
| 342 |
+
- arousal_bias/valence_bias: small nudges (-1..1) applied after scoring.
|
| 343 |
+
"""
|
| 344 |
+
if user_id not in CALIBRATION:
|
| 345 |
+
CALIBRATION[user_id] = {}
|
| 346 |
+
if bias:
|
| 347 |
+
for k, v in bias.items():
|
| 348 |
+
CALIBRATION[user_id][k] = float(v)
|
| 349 |
+
if arousal_bias:
|
| 350 |
+
CALIBRATION[user_id]["arousal_bias"] = float(arousal_bias)
|
| 351 |
+
if valence_bias:
|
| 352 |
+
CALIBRATION[user_id]["valence_bias"] = float(valence_bias)
|
| 353 |
+
return {"ok": True, "calibration": CALIBRATION[user_id]}
|
| 354 |
+
|
| 355 |
+
@app.tool()
|
| 356 |
+
def reset_calibration(user_id: str) -> dict:
|
| 357 |
+
"""Remove per-user calibration."""
|
| 358 |
+
CALIBRATION.pop(user_id, None)
|
| 359 |
+
return {"ok": True}
|
| 360 |
+
|
| 361 |
+
@app.tool()
|
| 362 |
+
def health() -> dict:
|
| 363 |
+
"""Simple health check for MCP status chips."""
|
| 364 |
+
return {"status": "ok", "version": "1.2.0", "time": time.time()}
|
| 365 |
+
|
| 366 |
+
@app.tool()
|
| 367 |
+
def version() -> dict:
|
| 368 |
+
"""Return server version & feature flags."""
|
| 369 |
+
return {
|
| 370 |
+
"name": "emotion-server",
|
| 371 |
+
"version": "1.2.0",
|
| 372 |
+
"features": ["negation", "intensifiers", "emoji", "sarcasm", "confidence", "batch", "calibration"],
|
| 373 |
+
"emotions": list(EMO_LEX.keys()),
|
| 374 |
+
}
|
| 375 |
+
|
| 376 |
+
if __name__ == "__main__":
|
| 377 |
+
app.run() # serves MCP over stdio
|
servers/memory_server.py
ADDED
|
@@ -0,0 +1,599 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# # servers/memory_server.py
|
| 2 |
+
# from fastmcp import FastMCP, tool
|
| 3 |
+
# import json, os, time
|
| 4 |
+
|
| 5 |
+
# app = FastMCP("memory-server")
|
| 6 |
+
# FILE = os.environ.get("GM_MEMORY_FILE", "memory.json")
|
| 7 |
+
|
| 8 |
+
# def _load():
|
| 9 |
+
# if os.path.exists(FILE):
|
| 10 |
+
# with open(FILE) as f: return json.load(f)
|
| 11 |
+
# return []
|
| 12 |
+
|
| 13 |
+
# def _save(history):
|
| 14 |
+
# with open(FILE, "w") as f: json.dump(history[-50:], f) # keep up to 50
|
| 15 |
+
|
| 16 |
+
# @tool
|
| 17 |
+
# def remember(text: str, meta: dict | None = None) -> dict:
|
| 18 |
+
# """
|
| 19 |
+
# Append an entry to memory.
|
| 20 |
+
# Args:
|
| 21 |
+
# text: str - content to store
|
| 22 |
+
# meta: dict - optional info like {"tone":"gentle","labels":["sad"]}
|
| 23 |
+
# Returns: {"ok": True, "size": <n>}
|
| 24 |
+
# """
|
| 25 |
+
# data = _load()
|
| 26 |
+
# data.append({"t": int(time.time()), "text": text, "meta": meta or {}})
|
| 27 |
+
# _save(data)
|
| 28 |
+
# return {"ok": True, "size": len(data)}
|
| 29 |
+
|
| 30 |
+
# @tool
|
| 31 |
+
# def recall(k: int = 3) -> dict:
|
| 32 |
+
# """
|
| 33 |
+
# Return last k entries from memory (most recent last).
|
| 34 |
+
# Args:
|
| 35 |
+
# k: int - how many items
|
| 36 |
+
# Returns: {"items":[...]}
|
| 37 |
+
# """
|
| 38 |
+
# data = _load()
|
| 39 |
+
# return {"items": data[-k:]}
|
| 40 |
+
|
| 41 |
+
# if __name__ == "__main__":
|
| 42 |
+
# app.run()
|
| 43 |
+
# servers/memory_server.py
|
| 44 |
+
# servers/memory_server.py
|
| 45 |
+
from __future__ import annotations
|
| 46 |
+
# ---- FastMCP import shim (works across versions) ----
|
| 47 |
+
# Ensures: FastMCP is imported and `@tool` is ALWAYS a callable decorator.
|
| 48 |
+
from typing import Callable, Any
|
| 49 |
+
|
| 50 |
+
try:
|
| 51 |
+
from fastmcp import FastMCP # present across versions
|
| 52 |
+
except Exception as e:
|
| 53 |
+
raise ImportError(f"FastMCP missing: {e}")
|
| 54 |
+
|
| 55 |
+
_tool_candidate: Any = None
|
| 56 |
+
# Try common locations
|
| 57 |
+
try:
|
| 58 |
+
from fastmcp import tool as _tool_candidate # newer API: function
|
| 59 |
+
except Exception:
|
| 60 |
+
try:
|
| 61 |
+
from fastmcp.tools import tool as _tool_candidate # older API: function
|
| 62 |
+
except Exception:
|
| 63 |
+
_tool_candidate = None
|
| 64 |
+
|
| 65 |
+
# If we somehow got a module instead of a function, try attribute
|
| 66 |
+
if _tool_candidate is not None and not callable(_tool_candidate):
|
| 67 |
+
try:
|
| 68 |
+
_tool_candidate = _tool_candidate.tool # some builds expose module.tools.tool
|
| 69 |
+
except Exception:
|
| 70 |
+
_tool_candidate = None
|
| 71 |
+
|
| 72 |
+
def tool(*dargs, **dkwargs):
|
| 73 |
+
"""
|
| 74 |
+
Wrapper that behaves correctly in both usages:
|
| 75 |
+
@tool
|
| 76 |
+
@tool(...)
|
| 77 |
+
If real decorator exists, delegate. Otherwise:
|
| 78 |
+
- If called as @tool (i.e., first arg is fn), return fn (no-op).
|
| 79 |
+
- If called as @tool(...), return a decorator that returns fn (no-op).
|
| 80 |
+
"""
|
| 81 |
+
if callable(_tool_candidate):
|
| 82 |
+
return _tool_candidate(*dargs, **dkwargs)
|
| 83 |
+
|
| 84 |
+
# No real decorator available — provide no-op behavior.
|
| 85 |
+
if dargs and callable(dargs[0]) and not dkwargs:
|
| 86 |
+
# Used as @tool
|
| 87 |
+
fn = dargs[0]
|
| 88 |
+
return fn
|
| 89 |
+
|
| 90 |
+
# Used as @tool(...)
|
| 91 |
+
def _noop_decorator(fn):
|
| 92 |
+
return fn
|
| 93 |
+
return _noop_decorator
|
| 94 |
+
# ---- end shim ----
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
import json, os, time, math, re
|
| 98 |
+
from typing import Dict, List, Optional, Any, Tuple
|
| 99 |
+
from collections import Counter
|
| 100 |
+
|
| 101 |
+
app = FastMCP("memory-server")
|
| 102 |
+
|
| 103 |
+
# ---------------------------
|
| 104 |
+
# Storage & limits
|
| 105 |
+
# ---------------------------
|
| 106 |
+
FILE = os.environ.get("GM_MEMORY_FILE", "memory.json")
|
| 107 |
+
STM_MAX = int(os.environ.get("GM_STM_MAX", "120"))
|
| 108 |
+
EP_MAX = int(os.environ.get("GM_EPISODES_MAX", "240"))
|
| 109 |
+
FACT_MAX= int(os.environ.get("GM_FACTS_MAX", "200"))
|
| 110 |
+
# ---------------------------
|
| 111 |
+
# Emotion Drift Analysis
|
| 112 |
+
# ---------------------------
|
| 113 |
+
def compute_emotional_direction(trajectory: List[Dict[str, Any]]) -> str:
|
| 114 |
+
"""
|
| 115 |
+
Analyze emotion trajectory to detect escalation/de-escalation/volatility/stability.
|
| 116 |
+
trajectory: list of {"label": str, "valence": float, "arousal": float, "ts": int}
|
| 117 |
+
"""
|
| 118 |
+
if len(trajectory) < 2:
|
| 119 |
+
return "stable"
|
| 120 |
+
|
| 121 |
+
# Get last 5 emotions for trend
|
| 122 |
+
recent = trajectory[-5:]
|
| 123 |
+
valences = [e.get("valence", 0.0) for e in recent]
|
| 124 |
+
arousals = [e.get("arousal", 0.5) for e in recent]
|
| 125 |
+
|
| 126 |
+
# Detect trend
|
| 127 |
+
valence_trend = valences[-1] - valences[0] # negative = more negative, positive = more positive
|
| 128 |
+
arousal_trend = arousals[-1] - arousals[0] # positive = escalating
|
| 129 |
+
|
| 130 |
+
# Classify
|
| 131 |
+
if arousal_trend > 0.15 and valence_trend < -0.1:
|
| 132 |
+
return "escalating" # Getting more activated and negative
|
| 133 |
+
elif arousal_trend < -0.15 and valence_trend > 0.1:
|
| 134 |
+
return "de-escalating" # Calming down and more positive
|
| 135 |
+
elif max(arousals) - min(arousals) > 0.3:
|
| 136 |
+
return "volatile" # Wide swings in arousal
|
| 137 |
+
else:
|
| 138 |
+
return "stable"
|
| 139 |
+
|
| 140 |
+
def get_emotion_trajectory(store: Dict[str, Any], k: int = 10) -> Tuple[List[Dict[str, Any]], str]:
|
| 141 |
+
"""
|
| 142 |
+
Returns last k emotion events from memory and the trajectory direction.
|
| 143 |
+
If a message is neutral, it inherits the previous emotion state.
|
| 144 |
+
"""
|
| 145 |
+
stm = store.get("stm", [])
|
| 146 |
+
trajectory = []
|
| 147 |
+
last_emotion_state = None # Track last non-neutral emotion
|
| 148 |
+
|
| 149 |
+
for item in stm[-k:]:
|
| 150 |
+
# Check if emotion data is in event.emotion (from remember_event)
|
| 151 |
+
event = item.get("event", {})
|
| 152 |
+
emotion = event.get("emotion", {})
|
| 153 |
+
|
| 154 |
+
# Also check if emotion data is in meta (from remember)
|
| 155 |
+
if not emotion or not emotion.get("labels"):
|
| 156 |
+
meta = item.get("meta", {})
|
| 157 |
+
if meta and meta.get("labels"):
|
| 158 |
+
emotion = meta
|
| 159 |
+
|
| 160 |
+
if emotion and emotion.get("labels"):
|
| 161 |
+
primary_label = (emotion.get("labels") or ["neutral"])[0]
|
| 162 |
+
valence = float(emotion.get("valence", 0.0))
|
| 163 |
+
arousal = float(emotion.get("arousal", 0.5))
|
| 164 |
+
|
| 165 |
+
# If this is neutral/weak emotion, inherit previous state
|
| 166 |
+
confidence = float(emotion.get("confidence", 0.25))
|
| 167 |
+
if primary_label in ["happy", "neutral"] and confidence < 0.35 and last_emotion_state:
|
| 168 |
+
# Carry forward previous emotion (decay slightly toward neutral)
|
| 169 |
+
primary_label = last_emotion_state["primary_label"]
|
| 170 |
+
valence = last_emotion_state["valence"] * 0.8 # Slight decay
|
| 171 |
+
arousal = last_emotion_state["arousal"] * 0.9
|
| 172 |
+
else:
|
| 173 |
+
# Strong emotion detected, update state
|
| 174 |
+
last_emotion_state = {
|
| 175 |
+
"primary_label": primary_label,
|
| 176 |
+
"valence": valence,
|
| 177 |
+
"arousal": arousal
|
| 178 |
+
}
|
| 179 |
+
|
| 180 |
+
trajectory.append({
|
| 181 |
+
"primary_label": primary_label,
|
| 182 |
+
"valence": valence,
|
| 183 |
+
"arousal": arousal,
|
| 184 |
+
"ts": item.get("t", int(time.time())),
|
| 185 |
+
"text": item.get("text", "")[:50] # First 50 chars
|
| 186 |
+
})
|
| 187 |
+
|
| 188 |
+
direction = compute_emotional_direction(trajectory)
|
| 189 |
+
return trajectory, direction
|
| 190 |
+
# ---------------------------
|
| 191 |
+
# File helpers & migrations
|
| 192 |
+
# ---------------------------
|
| 193 |
+
def _default_store() -> Dict[str, Any]:
|
| 194 |
+
return {"stm": [], "episodes": [], "facts": [], "meta": {"created": int(time.time()), "version": "1.3.0"}}
|
| 195 |
+
|
| 196 |
+
def _load() -> Dict[str, Any]:
|
| 197 |
+
if os.path.exists(FILE):
|
| 198 |
+
with open(FILE) as f:
|
| 199 |
+
try:
|
| 200 |
+
data = json.load(f)
|
| 201 |
+
# migrate flat list → tiered
|
| 202 |
+
if isinstance(data, list):
|
| 203 |
+
data = {"stm": data[-STM_MAX:], "episodes": [], "facts": [], "meta": {"created": int(time.time()), "version": "1.3.0"}}
|
| 204 |
+
# backfill ids in stm
|
| 205 |
+
changed = False
|
| 206 |
+
for i, it in enumerate(data.get("stm", [])):
|
| 207 |
+
if "id" not in it:
|
| 208 |
+
it["id"] = f"stm-{it.get('t', int(time.time()))}-{i}"
|
| 209 |
+
changed = True
|
| 210 |
+
if changed:
|
| 211 |
+
_save(data)
|
| 212 |
+
return data
|
| 213 |
+
except Exception:
|
| 214 |
+
return _default_store()
|
| 215 |
+
return _default_store()
|
| 216 |
+
|
| 217 |
+
def _save(store: Dict[str, Any]) -> None:
|
| 218 |
+
store["stm"] = store.get("stm", [])[-STM_MAX:]
|
| 219 |
+
store["episodes"] = store.get("episodes", [])[-EP_MAX:]
|
| 220 |
+
store["facts"] = store.get("facts", [])[-FACT_MAX:]
|
| 221 |
+
with open(FILE, "w") as f:
|
| 222 |
+
json.dump(store, f, ensure_ascii=False)
|
| 223 |
+
|
| 224 |
+
# ---------------------------
|
| 225 |
+
# Salience & decay (same as before)
|
| 226 |
+
# ---------------------------
|
| 227 |
+
def time_decay(ts: float, now: Optional[float] = None, half_life_hours: float = 72.0) -> float:
|
| 228 |
+
now = now or time.time()
|
| 229 |
+
dt_h = max(0.0, (now - ts) / 3600.0)
|
| 230 |
+
return 0.5 ** (dt_h / half_life_hours)
|
| 231 |
+
|
| 232 |
+
_WORD = re.compile(r"[a-zA-Z']+")
|
| 233 |
+
|
| 234 |
+
def keyword_set(text: str) -> set:
|
| 235 |
+
return set(w.lower() for w in _WORD.findall(text or "") if len(w) > 2)
|
| 236 |
+
|
| 237 |
+
def novelty_score(text: str, recent_texts: List[str], k: int = 10) -> float:
|
| 238 |
+
if not text:
|
| 239 |
+
return 0.0
|
| 240 |
+
A = keyword_set(text)
|
| 241 |
+
if not A:
|
| 242 |
+
return 0.0
|
| 243 |
+
recent = [keyword_set(t) for t in recent_texts[-k:] if t]
|
| 244 |
+
if not recent:
|
| 245 |
+
return 1.0
|
| 246 |
+
sims = []
|
| 247 |
+
for B in recent:
|
| 248 |
+
inter = len(A & B)
|
| 249 |
+
union = len(A | B) or 1
|
| 250 |
+
sims.append(inter / union)
|
| 251 |
+
sim = max(sims) if sims else 0.0
|
| 252 |
+
return max(0.0, 1.0 - sim)
|
| 253 |
+
|
| 254 |
+
def compute_salience(ev: Dict[str, Any], recent_texts: List[str]) -> float:
|
| 255 |
+
labels = ev.get("emotion", {}).get("labels") or []
|
| 256 |
+
conf = float(ev.get("emotion", {}).get("confidence") or 0.0)
|
| 257 |
+
valence= float(ev.get("emotion", {}).get("valence") or 0.0)
|
| 258 |
+
arousal= float(ev.get("emotion", {}).get("arousal") or 0.5)
|
| 259 |
+
sinc = float(ev.get("sincerity") or 0.0) / 100.0
|
| 260 |
+
text = ev.get("text", "")
|
| 261 |
+
|
| 262 |
+
affect = abs(valence) * (0.7 + 0.3 * arousal) * conf
|
| 263 |
+
nov = novelty_score(text, recent_texts)
|
| 264 |
+
user_flag = 1.0 if ev.get("user_pinned") else 0.0
|
| 265 |
+
boundary = 1.0 if ev.get("task_boundary") else 0.0
|
| 266 |
+
|
| 267 |
+
sal = 0.45 * affect + 0.25 * nov + 0.18 * user_flag + 0.12 * boundary + 0.10 * sinc
|
| 268 |
+
return round(max(0.0, min(1.0, sal)), 3)
|
| 269 |
+
|
| 270 |
+
# ---------------------------
|
| 271 |
+
# Episode & fact synthesis
|
| 272 |
+
# ---------------------------
|
| 273 |
+
def make_episode(ev: Dict[str, Any], salience: float) -> Dict[str, Any]:
|
| 274 |
+
emo = ev.get("emotion", {})
|
| 275 |
+
return {
|
| 276 |
+
"episode_id": ev.get("id") or f"ep-{int(time.time()*1000)}",
|
| 277 |
+
"ts_start": ev.get("ts") or int(time.time()),
|
| 278 |
+
"ts_end": ev.get("ts") or int(time.time()),
|
| 279 |
+
"summary": ev.get("summary") or (ev.get("text")[:140] if ev.get("text") else ""),
|
| 280 |
+
"topics": list(set(emo.get("labels") or [])) or ["misc"],
|
| 281 |
+
"emotion_peak": (emo.get("labels") or ["neutral"])[0],
|
| 282 |
+
"emotion_conf": float(emo.get("confidence") or 0.0),
|
| 283 |
+
"tone": emo.get("tone") or "neutral",
|
| 284 |
+
"salience": float(salience),
|
| 285 |
+
"provenance_event": ev.get("id"),
|
| 286 |
+
}
|
| 287 |
+
|
| 288 |
+
def cluster_topics(episodes: List[Dict[str, Any]]) -> Dict[str, List[Dict[str, Any]]]:
|
| 289 |
+
buckets: Dict[str, List[Dict[str, Any]]] = {}
|
| 290 |
+
for ep in episodes:
|
| 291 |
+
for t in ep.get("topics") or ["misc"]:
|
| 292 |
+
buckets.setdefault(t, []).append(ep)
|
| 293 |
+
return buckets
|
| 294 |
+
|
| 295 |
+
def synthesize_fact(topic: str, eps: List[Dict[str, Any]]) -> Optional[Dict[str, Any]]:
|
| 296 |
+
if not eps:
|
| 297 |
+
return None
|
| 298 |
+
support = len(eps)
|
| 299 |
+
avg_sal = sum(e.get("salience", 0.0) for e in eps) / max(1, support)
|
| 300 |
+
avg_conf= sum(e.get("emotion_conf", 0.0) for e in eps) / max(1, support)
|
| 301 |
+
conf = max(0.0, min(1.0, 0.5 * avg_sal + 0.5 * avg_conf))
|
| 302 |
+
if support < 3 or conf < 0.6:
|
| 303 |
+
return None
|
| 304 |
+
tones = {}
|
| 305 |
+
for e in eps: tones[e.get("tone", "neutral")] = tones.get(e.get("tone", "neutral"), 0) + 1
|
| 306 |
+
top_tone = sorted(tones.items(), key=lambda kv: kv[1], reverse=True)[0][0]
|
| 307 |
+
return {
|
| 308 |
+
"fact_id": f"fact-{topic}-{int(time.time())}",
|
| 309 |
+
"proposition": f"Prefers {top_tone} tone for topic '{topic}'",
|
| 310 |
+
"support": support,
|
| 311 |
+
"confidence": round(conf, 2),
|
| 312 |
+
"last_updated": int(time.time()),
|
| 313 |
+
"topics": [topic],
|
| 314 |
+
"provenance_episode_ids": [e["episode_id"] for e in eps],
|
| 315 |
+
}
|
| 316 |
+
|
| 317 |
+
# ---------------------------
|
| 318 |
+
# ID helpers
|
| 319 |
+
# ---------------------------
|
| 320 |
+
def _ensure_stm_id(item: Dict[str, Any], idx: int) -> Dict[str, Any]:
|
| 321 |
+
if "id" not in item:
|
| 322 |
+
item["id"] = f"stm-{item.get('t', int(time.time()))}-{idx}"
|
| 323 |
+
return item
|
| 324 |
+
|
| 325 |
+
def _stm_text(item: Dict[str, Any]) -> str:
|
| 326 |
+
if "text" in item and isinstance(item["text"], str):
|
| 327 |
+
return item["text"]
|
| 328 |
+
return (item.get("event") or {}).get("text", "") or ""
|
| 329 |
+
|
| 330 |
+
def _collect_docs(store: Dict[str, Any], tier: Optional[str] = None) -> List[Tuple[str,str,str,int]]:
|
| 331 |
+
"""
|
| 332 |
+
Returns list of (id, tier, text, ts)
|
| 333 |
+
"""
|
| 334 |
+
docs: List[Tuple[str,str,str,int]] = []
|
| 335 |
+
if tier in (None, "stm"):
|
| 336 |
+
for i, it in enumerate(store.get("stm", [])):
|
| 337 |
+
it = _ensure_stm_id(it, i)
|
| 338 |
+
docs.append((it["id"], "stm", _stm_text(it), int(it.get("t", time.time()))))
|
| 339 |
+
if tier in (None, "episodes"):
|
| 340 |
+
for ep in store.get("episodes", []):
|
| 341 |
+
docs.append((ep.get("episode_id",""), "episodes", ep.get("summary",""), int(ep.get("ts_end", time.time()))))
|
| 342 |
+
if tier in (None, "facts"):
|
| 343 |
+
for f in store.get("facts", []):
|
| 344 |
+
docs.append((f.get("fact_id",""), "facts", f.get("proposition",""), int(f.get("last_updated", time.time()))))
|
| 345 |
+
return [d for d in docs if d[0] and d[2]]
|
| 346 |
+
|
| 347 |
+
# ---------------------------
|
| 348 |
+
# Simple TF-IDF search
|
| 349 |
+
# ---------------------------
|
| 350 |
+
def _tfidf_rank(query: str, docs: List[Tuple[str,str,str,int]], k: int = 5):
|
| 351 |
+
q_terms = [w for w in keyword_set(query)]
|
| 352 |
+
if not q_terms or not docs:
|
| 353 |
+
return []
|
| 354 |
+
# DF
|
| 355 |
+
df = Counter()
|
| 356 |
+
doc_terms = {}
|
| 357 |
+
for _id, _tier, text, _ts in docs:
|
| 358 |
+
terms = [w for w in keyword_set(text)]
|
| 359 |
+
doc_terms[_id] = terms
|
| 360 |
+
for t in set(terms):
|
| 361 |
+
df[t] += 1
|
| 362 |
+
N = len(docs)
|
| 363 |
+
idf = {t: math.log((N + 1) / (df[t] + 1)) + 1.0 for t in df}
|
| 364 |
+
# Score
|
| 365 |
+
scored = []
|
| 366 |
+
qset = set(q_terms)
|
| 367 |
+
for _id, _tier, text, _ts in docs:
|
| 368 |
+
terms = doc_terms[_id]
|
| 369 |
+
tf = Counter(terms)
|
| 370 |
+
score = 0.0
|
| 371 |
+
matched = []
|
| 372 |
+
for t in q_terms:
|
| 373 |
+
if tf[t] > 0:
|
| 374 |
+
score += tf[t] * idf.get(t, 1.0)
|
| 375 |
+
matched.append(t)
|
| 376 |
+
if score > 0:
|
| 377 |
+
scored.append((_id, _tier, text, _ts, score, matched))
|
| 378 |
+
scored.sort(key=lambda x: (-x[4], -x[3])) # score desc, then recent
|
| 379 |
+
return scored[:k]
|
| 380 |
+
|
| 381 |
+
# ---------------------------
|
| 382 |
+
# Tools (API)
|
| 383 |
+
# ---------------------------
|
| 384 |
+
|
| 385 |
+
@app.tool()
|
| 386 |
+
def remember(text: str, meta: dict | None = None) -> dict:
|
| 387 |
+
store = _load()
|
| 388 |
+
item = {"t": int(time.time()), "text": text, "meta": meta or {}}
|
| 389 |
+
item["id"] = f"stm-{item['t']}-{len(store.get('stm', []))}"
|
| 390 |
+
store["stm"].append(item)
|
| 391 |
+
_save(store)
|
| 392 |
+
return {"ok": True, "stm_size": len(store["stm"]), "id": item["id"]}
|
| 393 |
+
|
| 394 |
+
@app.tool()
|
| 395 |
+
def remember_event(event: dict, promote: bool = True) -> dict:
|
| 396 |
+
store = _load()
|
| 397 |
+
ev = dict(event or {})
|
| 398 |
+
ev.setdefault("ts", int(time.time()))
|
| 399 |
+
ev.setdefault("role", "user")
|
| 400 |
+
ev.setdefault("text", "")
|
| 401 |
+
if "salience" not in ev:
|
| 402 |
+
recent_texts = [it.get("text","") for it in store.get("stm", [])[-10:]]
|
| 403 |
+
ev["salience"] = compute_salience(ev, recent_texts)
|
| 404 |
+
stm_item = {
|
| 405 |
+
"id": f"stm-{ev['ts']}-{len(store.get('stm', []))}",
|
| 406 |
+
"t": ev["ts"],
|
| 407 |
+
"text": ev.get("text",""),
|
| 408 |
+
"event": ev
|
| 409 |
+
}
|
| 410 |
+
store["stm"].append(stm_item)
|
| 411 |
+
if promote:
|
| 412 |
+
aff_conf = float(ev.get("emotion", {}).get("confidence") or 0.0)
|
| 413 |
+
if ev["salience"] >= 0.45 or ev.get("user_pinned") or ev.get("task_boundary"):
|
| 414 |
+
ep = make_episode(ev, ev["salience"])
|
| 415 |
+
store["episodes"].append(ep)
|
| 416 |
+
_save(store)
|
| 417 |
+
return {"ok": True, "salience": ev["salience"], "id": stm_item["id"],
|
| 418 |
+
"sizes": {"stm": len(store["stm"]), "episodes": len(store["episodes"]), "facts": len(store["facts"])}}
|
| 419 |
+
|
| 420 |
+
@app.tool()
|
| 421 |
+
def recall(k: int = 3) -> dict:
|
| 422 |
+
store = _load()
|
| 423 |
+
items = store.get("stm", [])[-k:]
|
| 424 |
+
return {"items": items}
|
| 425 |
+
|
| 426 |
+
@app.tool()
|
| 427 |
+
def recall_episodes(k: int = 5, topic: str | None = None) -> dict:
|
| 428 |
+
store = _load()
|
| 429 |
+
eps = store.get("episodes", [])
|
| 430 |
+
if topic:
|
| 431 |
+
eps = [e for e in eps if topic in (e.get("topics") or [])]
|
| 432 |
+
return {"items": eps[-k:]}
|
| 433 |
+
|
| 434 |
+
@app.tool()
|
| 435 |
+
def recall_facts() -> dict:
|
| 436 |
+
store = _load()
|
| 437 |
+
return {"facts": store.get("facts", [])}
|
| 438 |
+
|
| 439 |
+
@app.tool()
|
| 440 |
+
def reflect() -> dict:
|
| 441 |
+
store = _load()
|
| 442 |
+
eps = store.get("episodes", [])
|
| 443 |
+
if not eps:
|
| 444 |
+
return {"ok": True, "updated": 0, "facts": store.get("facts", [])}
|
| 445 |
+
buckets = cluster_topics(eps)
|
| 446 |
+
new_facts = []
|
| 447 |
+
for topic, group in buckets.items():
|
| 448 |
+
fact = synthesize_fact(topic, group)
|
| 449 |
+
if fact:
|
| 450 |
+
existing = next((f for f in store["facts"] if f.get("proposition") == fact["proposition"]), None)
|
| 451 |
+
if existing:
|
| 452 |
+
existing["support"] = max(existing.get("support", 0), fact["support"])
|
| 453 |
+
existing["confidence"] = round(max(existing.get("confidence", 0.0), fact["confidence"]), 2)
|
| 454 |
+
existing["last_updated"] = int(time.time())
|
| 455 |
+
else:
|
| 456 |
+
new_facts.append(fact)
|
| 457 |
+
store["facts"].extend(new_facts)
|
| 458 |
+
_save(store)
|
| 459 |
+
return {"ok": True, "updated": len(new_facts), "facts": store["facts"]}
|
| 460 |
+
|
| 461 |
+
@app.tool()
|
| 462 |
+
def prune(before_ts: int | None = None) -> dict:
|
| 463 |
+
store = _load()
|
| 464 |
+
stm = store.get("stm", [])
|
| 465 |
+
if before_ts:
|
| 466 |
+
stm = [it for it in stm if it.get("t", 0) >= int(before_ts)]
|
| 467 |
+
else:
|
| 468 |
+
cut = int(len(stm) * 0.75)
|
| 469 |
+
stm = stm[cut:]
|
| 470 |
+
store["stm"] = stm
|
| 471 |
+
_save(store)
|
| 472 |
+
return {"ok": True, "stm_size": len(store["stm"])}
|
| 473 |
+
|
| 474 |
+
# -------- NEW: search / get / delete / list --------
|
| 475 |
+
|
| 476 |
+
@app.tool()
|
| 477 |
+
def search(query: str, tier: str | None = None, k: int = 5) -> dict:
|
| 478 |
+
"""
|
| 479 |
+
TF-IDF search across memory.
|
| 480 |
+
Args:
|
| 481 |
+
query: text to search
|
| 482 |
+
tier: one of {"stm","episodes","facts"} or None for all
|
| 483 |
+
k: number of results
|
| 484 |
+
Returns: {"results":[{"id","tier","text","ts","score","matched"}]}
|
| 485 |
+
"""
|
| 486 |
+
store = _load()
|
| 487 |
+
docs = _collect_docs(store, tier=tier)
|
| 488 |
+
ranked = _tfidf_rank(query, docs, k=k)
|
| 489 |
+
results = [{"id": _id, "tier": _tier, "text": text, "ts": ts, "score": round(score,3), "matched": matched}
|
| 490 |
+
for (_id, _tier, text, ts, score, matched) in ranked]
|
| 491 |
+
return {"results": results}
|
| 492 |
+
|
| 493 |
+
@app.tool()
|
| 494 |
+
def get(item_id: str) -> dict:
|
| 495 |
+
"""
|
| 496 |
+
Fetch a single item by id from any tier.
|
| 497 |
+
"""
|
| 498 |
+
s = _load()
|
| 499 |
+
for it in s.get("stm", []):
|
| 500 |
+
if it.get("id") == item_id:
|
| 501 |
+
return {"tier": "stm", "item": it}
|
| 502 |
+
for ep in s.get("episodes", []):
|
| 503 |
+
if ep.get("episode_id") == item_id:
|
| 504 |
+
return {"tier": "episodes", "item": ep}
|
| 505 |
+
for f in s.get("facts", []):
|
| 506 |
+
if f.get("fact_id") == item_id:
|
| 507 |
+
return {"tier": "facts", "item": f}
|
| 508 |
+
return {"tier": None, "item": None}
|
| 509 |
+
|
| 510 |
+
@app.tool()
|
| 511 |
+
def delete_by_id(item_id: str, tier: str | None = None) -> dict:
|
| 512 |
+
"""
|
| 513 |
+
Delete a single item by id. If tier is None, searches all tiers.
|
| 514 |
+
Returns {"ok": bool, "removed_from": <tier>|None}
|
| 515 |
+
"""
|
| 516 |
+
s = _load()
|
| 517 |
+
removed_from = None
|
| 518 |
+
if tier in (None, "stm"):
|
| 519 |
+
before = len(s["stm"])
|
| 520 |
+
s["stm"] = [it for it in s["stm"] if it.get("id") != item_id]
|
| 521 |
+
if len(s["stm"]) != before: removed_from = "stm"
|
| 522 |
+
if not removed_from and tier in (None, "episodes"):
|
| 523 |
+
before = len(s["episodes"])
|
| 524 |
+
s["episodes"] = [e for e in s["episodes"] if e.get("episode_id") != item_id]
|
| 525 |
+
if len(s["episodes"]) != before: removed_from = "episodes"
|
| 526 |
+
if not removed_from and tier in (None, "facts"):
|
| 527 |
+
before = len(s["facts"])
|
| 528 |
+
s["facts"] = [f for f in s["facts"] if f.get("fact_id") != item_id]
|
| 529 |
+
if len(s["facts"]) != before: removed_from = "facts"
|
| 530 |
+
if removed_from:
|
| 531 |
+
_save(s)
|
| 532 |
+
return {"ok": True, "removed_from": removed_from}
|
| 533 |
+
return {"ok": False, "removed_from": None}
|
| 534 |
+
|
| 535 |
+
@app.tool()
|
| 536 |
+
def list_items(tier: str, k: int = 10) -> dict:
|
| 537 |
+
"""
|
| 538 |
+
List last k items in a tier.
|
| 539 |
+
tier ∈ {"stm","episodes","facts"}
|
| 540 |
+
"""
|
| 541 |
+
s = _load()
|
| 542 |
+
if tier == "stm":
|
| 543 |
+
return {"items": s.get("stm", [])[-k:]}
|
| 544 |
+
if tier == "episodes":
|
| 545 |
+
return {"items": s.get("episodes", [])[-k:]}
|
| 546 |
+
if tier == "facts":
|
| 547 |
+
return {"items": s.get("facts", [])[-k:]}
|
| 548 |
+
return {"items": []}
|
| 549 |
+
|
| 550 |
+
# -------- Diagnostics --------
|
| 551 |
+
|
| 552 |
+
@app.tool()
|
| 553 |
+
def stats() -> dict:
|
| 554 |
+
s = _load()
|
| 555 |
+
return {
|
| 556 |
+
"stm": len(s.get("stm", [])),
|
| 557 |
+
"episodes": len(s.get("episodes", [])),
|
| 558 |
+
"facts": len(s.get("facts", [])),
|
| 559 |
+
"file": FILE,
|
| 560 |
+
"created": s.get("meta", {}).get("created"),
|
| 561 |
+
"version": s.get("meta", {}).get("version", "1.3.0"),
|
| 562 |
+
}
|
| 563 |
+
|
| 564 |
+
@app.tool()
|
| 565 |
+
def health() -> dict:
|
| 566 |
+
try:
|
| 567 |
+
s = _load()
|
| 568 |
+
return {"status": "ok", "stm": len(s.get("stm", [])), "episodes": len(s.get("episodes", [])), "facts": len(s.get("facts", [])), "time": time.time(), "version": "1.3.0"}
|
| 569 |
+
except Exception as e:
|
| 570 |
+
return {"status": "error", "error": str(e), "time": time.time()}
|
| 571 |
+
|
| 572 |
+
@app.tool()
|
| 573 |
+
def version() -> dict:
|
| 574 |
+
return {"name": "memory-server", "version": "1.3.0", "tiers": ["stm","episodes","facts"], "file": FILE}
|
| 575 |
+
|
| 576 |
+
@app.tool()
|
| 577 |
+
def get_emotion_arc(k: int = 10) -> dict:
|
| 578 |
+
"""
|
| 579 |
+
Get the emotion trajectory (arc) for the last k events.
|
| 580 |
+
Returns: {"trajectory": [...], "direction": "escalating|de-escalating|volatile|stable", "summary": str}
|
| 581 |
+
"""
|
| 582 |
+
store = _load()
|
| 583 |
+
trajectory, direction = get_emotion_trajectory(store, k=k)
|
| 584 |
+
|
| 585 |
+
if not trajectory:
|
| 586 |
+
return {"trajectory": [], "direction": "unknown", "summary": "No emotion history"}
|
| 587 |
+
|
| 588 |
+
# Create readable summary
|
| 589 |
+
emotions = [t["primary_label"] for t in trajectory]
|
| 590 |
+
summary = " → ".join(emotions[-5:]) if len(emotions) >= 5 else " → ".join(emotions)
|
| 591 |
+
|
| 592 |
+
return {
|
| 593 |
+
"trajectory": trajectory,
|
| 594 |
+
"direction": direction,
|
| 595 |
+
"summary": summary
|
| 596 |
+
}
|
| 597 |
+
|
| 598 |
+
if __name__ == "__main__":
|
| 599 |
+
app.run() # serves MCP over stdio
|
servers/memory_server.py.bak
ADDED
|
@@ -0,0 +1,570 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# # servers/memory_server.py
|
| 2 |
+
# from fastmcp import FastMCP, tool
|
| 3 |
+
# import json, os, time
|
| 4 |
+
|
| 5 |
+
# app = FastMCP("memory-server")
|
| 6 |
+
# FILE = os.environ.get("GM_MEMORY_FILE", "memory.json")
|
| 7 |
+
|
| 8 |
+
# def _load():
|
| 9 |
+
# if os.path.exists(FILE):
|
| 10 |
+
# with open(FILE) as f: return json.load(f)
|
| 11 |
+
# return []
|
| 12 |
+
|
| 13 |
+
# def _save(history):
|
| 14 |
+
# with open(FILE, "w") as f: json.dump(history[-50:], f) # keep up to 50
|
| 15 |
+
|
| 16 |
+
# @tool
|
| 17 |
+
# def remember(text: str, meta: dict | None = None) -> dict:
|
| 18 |
+
# """
|
| 19 |
+
# Append an entry to memory.
|
| 20 |
+
# Args:
|
| 21 |
+
# text: str - content to store
|
| 22 |
+
# meta: dict - optional info like {"tone":"gentle","labels":["sad"]}
|
| 23 |
+
# Returns: {"ok": True, "size": <n>}
|
| 24 |
+
# """
|
| 25 |
+
# data = _load()
|
| 26 |
+
# data.append({"t": int(time.time()), "text": text, "meta": meta or {}})
|
| 27 |
+
# _save(data)
|
| 28 |
+
# return {"ok": True, "size": len(data)}
|
| 29 |
+
|
| 30 |
+
# @tool
|
| 31 |
+
# def recall(k: int = 3) -> dict:
|
| 32 |
+
# """
|
| 33 |
+
# Return last k entries from memory (most recent last).
|
| 34 |
+
# Args:
|
| 35 |
+
# k: int - how many items
|
| 36 |
+
# Returns: {"items":[...]}
|
| 37 |
+
# """
|
| 38 |
+
# data = _load()
|
| 39 |
+
# return {"items": data[-k:]}
|
| 40 |
+
|
| 41 |
+
# if __name__ == "__main__":
|
| 42 |
+
# app.run()
|
| 43 |
+
# servers/memory_server.py
|
| 44 |
+
# servers/memory_server.py
|
| 45 |
+
from __future__ import annotations
|
| 46 |
+
# ---- FastMCP import shim (works across versions) ----
|
| 47 |
+
# Ensures: FastMCP is imported and `@tool` is ALWAYS a callable decorator.
|
| 48 |
+
from typing import Callable, Any
|
| 49 |
+
|
| 50 |
+
try:
|
| 51 |
+
from fastmcp import FastMCP # present across versions
|
| 52 |
+
except Exception as e:
|
| 53 |
+
raise ImportError(f"FastMCP missing: {e}")
|
| 54 |
+
|
| 55 |
+
_tool_candidate: Any = None
|
| 56 |
+
# Try common locations
|
| 57 |
+
try:
|
| 58 |
+
from fastmcp import tool as _tool_candidate # newer API: function
|
| 59 |
+
except Exception:
|
| 60 |
+
try:
|
| 61 |
+
from fastmcp.tools import tool as _tool_candidate # older API: function
|
| 62 |
+
except Exception:
|
| 63 |
+
_tool_candidate = None
|
| 64 |
+
|
| 65 |
+
# If we somehow got a module instead of a function, try attribute
|
| 66 |
+
if _tool_candidate is not None and not callable(_tool_candidate):
|
| 67 |
+
try:
|
| 68 |
+
_tool_candidate = _tool_candidate.tool # some builds expose module.tools.tool
|
| 69 |
+
except Exception:
|
| 70 |
+
_tool_candidate = None
|
| 71 |
+
|
| 72 |
+
def tool(*dargs, **dkwargs):
|
| 73 |
+
"""
|
| 74 |
+
Wrapper that behaves correctly in both usages:
|
| 75 |
+
@tool
|
| 76 |
+
@tool(...)
|
| 77 |
+
If real decorator exists, delegate. Otherwise:
|
| 78 |
+
- If called as @tool (i.e., first arg is fn), return fn (no-op).
|
| 79 |
+
- If called as @tool(...), return a decorator that returns fn (no-op).
|
| 80 |
+
"""
|
| 81 |
+
if callable(_tool_candidate):
|
| 82 |
+
return _tool_candidate(*dargs, **dkwargs)
|
| 83 |
+
|
| 84 |
+
# No real decorator available — provide no-op behavior.
|
| 85 |
+
if dargs and callable(dargs[0]) and not dkwargs:
|
| 86 |
+
# Used as @tool
|
| 87 |
+
fn = dargs[0]
|
| 88 |
+
return fn
|
| 89 |
+
|
| 90 |
+
# Used as @tool(...)
|
| 91 |
+
def _noop_decorator(fn):
|
| 92 |
+
return fn
|
| 93 |
+
return _noop_decorator
|
| 94 |
+
# ---- end shim ----
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
import json, os, time, math, re
|
| 98 |
+
from typing import Dict, List, Optional, Any, Tuple
|
| 99 |
+
from collections import Counter
|
| 100 |
+
|
| 101 |
+
app = FastMCP("memory-server")
|
| 102 |
+
|
| 103 |
+
# ---------------------------
|
| 104 |
+
# Storage & limits
|
| 105 |
+
# ---------------------------
|
| 106 |
+
FILE = os.environ.get("GM_MEMORY_FILE", "memory.json")
|
| 107 |
+
STM_MAX = int(os.environ.get("GM_STM_MAX", "120"))
|
| 108 |
+
EP_MAX = int(os.environ.get("GM_EPISODES_MAX", "240"))
|
| 109 |
+
FACT_MAX= int(os.environ.get("GM_FACTS_MAX", "200"))
|
| 110 |
+
# ---------------------------
|
| 111 |
+
# Emotion Drift Analysis
|
| 112 |
+
# ---------------------------
|
| 113 |
+
def compute_emotional_direction(trajectory: List[Dict[str, Any]]) -> str:
|
| 114 |
+
"""
|
| 115 |
+
Analyze emotion trajectory to detect escalation/de-escalation/volatility/stability.
|
| 116 |
+
trajectory: list of {"label": str, "valence": float, "arousal": float, "ts": int}
|
| 117 |
+
"""
|
| 118 |
+
if len(trajectory) < 2:
|
| 119 |
+
return "stable"
|
| 120 |
+
|
| 121 |
+
# Get last 5 emotions for trend
|
| 122 |
+
recent = trajectory[-5:]
|
| 123 |
+
valences = [e.get("valence", 0.0) for e in recent]
|
| 124 |
+
arousals = [e.get("arousal", 0.5) for e in recent]
|
| 125 |
+
|
| 126 |
+
# Detect trend
|
| 127 |
+
valence_trend = valences[-1] - valences[0] # negative = more negative, positive = more positive
|
| 128 |
+
arousal_trend = arousals[-1] - arousals[0] # positive = escalating
|
| 129 |
+
|
| 130 |
+
# Classify
|
| 131 |
+
if arousal_trend > 0.15 and valence_trend < -0.1:
|
| 132 |
+
return "escalating" # Getting more activated and negative
|
| 133 |
+
elif arousal_trend < -0.15 and valence_trend > 0.1:
|
| 134 |
+
return "de-escalating" # Calming down and more positive
|
| 135 |
+
elif max(arousals) - min(arousals) > 0.3:
|
| 136 |
+
return "volatile" # Wide swings in arousal
|
| 137 |
+
else:
|
| 138 |
+
return "stable"
|
| 139 |
+
|
| 140 |
+
def get_emotion_trajectory(store: Dict[str, Any], k: int = 10) -> Tuple[List[Dict[str, Any]], str]:
|
| 141 |
+
"""
|
| 142 |
+
Returns last k emotion events from memory and the trajectory direction.
|
| 143 |
+
"""
|
| 144 |
+
stm = store.get("stm", [])
|
| 145 |
+
trajectory = []
|
| 146 |
+
|
| 147 |
+
for item in stm[-k:]:
|
| 148 |
+
event = item.get("event", {})
|
| 149 |
+
emotion = event.get("emotion", {})
|
| 150 |
+
if emotion and emotion.get("labels"):
|
| 151 |
+
trajectory.append({
|
| 152 |
+
"label": (emotion.get("labels") or ["neutral"])[0],
|
| 153 |
+
"valence": float(emotion.get("valence", 0.0)),
|
| 154 |
+
"arousal": float(emotion.get("arousal", 0.5)),
|
| 155 |
+
"ts": event.get("ts", int(time.time())),
|
| 156 |
+
"text": event.get("text", "")[:50] # First 50 chars
|
| 157 |
+
})
|
| 158 |
+
|
| 159 |
+
direction = compute_emotional_direction(trajectory)
|
| 160 |
+
return trajectory, direction
|
| 161 |
+
# ---------------------------
|
| 162 |
+
# File helpers & migrations
|
| 163 |
+
# ---------------------------
|
| 164 |
+
def _default_store() -> Dict[str, Any]:
|
| 165 |
+
return {"stm": [], "episodes": [], "facts": [], "meta": {"created": int(time.time()), "version": "1.3.0"}}
|
| 166 |
+
|
| 167 |
+
def _load() -> Dict[str, Any]:
|
| 168 |
+
if os.path.exists(FILE):
|
| 169 |
+
with open(FILE) as f:
|
| 170 |
+
try:
|
| 171 |
+
data = json.load(f)
|
| 172 |
+
# migrate flat list → tiered
|
| 173 |
+
if isinstance(data, list):
|
| 174 |
+
data = {"stm": data[-STM_MAX:], "episodes": [], "facts": [], "meta": {"created": int(time.time()), "version": "1.3.0"}}
|
| 175 |
+
# backfill ids in stm
|
| 176 |
+
changed = False
|
| 177 |
+
for i, it in enumerate(data.get("stm", [])):
|
| 178 |
+
if "id" not in it:
|
| 179 |
+
it["id"] = f"stm-{it.get('t', int(time.time()))}-{i}"
|
| 180 |
+
changed = True
|
| 181 |
+
if changed:
|
| 182 |
+
_save(data)
|
| 183 |
+
return data
|
| 184 |
+
except Exception:
|
| 185 |
+
return _default_store()
|
| 186 |
+
return _default_store()
|
| 187 |
+
|
| 188 |
+
def _save(store: Dict[str, Any]) -> None:
|
| 189 |
+
store["stm"] = store.get("stm", [])[-STM_MAX:]
|
| 190 |
+
store["episodes"] = store.get("episodes", [])[-EP_MAX:]
|
| 191 |
+
store["facts"] = store.get("facts", [])[-FACT_MAX:]
|
| 192 |
+
with open(FILE, "w") as f:
|
| 193 |
+
json.dump(store, f, ensure_ascii=False)
|
| 194 |
+
|
| 195 |
+
# ---------------------------
|
| 196 |
+
# Salience & decay (same as before)
|
| 197 |
+
# ---------------------------
|
| 198 |
+
def time_decay(ts: float, now: Optional[float] = None, half_life_hours: float = 72.0) -> float:
|
| 199 |
+
now = now or time.time()
|
| 200 |
+
dt_h = max(0.0, (now - ts) / 3600.0)
|
| 201 |
+
return 0.5 ** (dt_h / half_life_hours)
|
| 202 |
+
|
| 203 |
+
_WORD = re.compile(r"[a-zA-Z']+")
|
| 204 |
+
|
| 205 |
+
def keyword_set(text: str) -> set:
|
| 206 |
+
return set(w.lower() for w in _WORD.findall(text or "") if len(w) > 2)
|
| 207 |
+
|
| 208 |
+
def novelty_score(text: str, recent_texts: List[str], k: int = 10) -> float:
|
| 209 |
+
if not text:
|
| 210 |
+
return 0.0
|
| 211 |
+
A = keyword_set(text)
|
| 212 |
+
if not A:
|
| 213 |
+
return 0.0
|
| 214 |
+
recent = [keyword_set(t) for t in recent_texts[-k:] if t]
|
| 215 |
+
if not recent:
|
| 216 |
+
return 1.0
|
| 217 |
+
sims = []
|
| 218 |
+
for B in recent:
|
| 219 |
+
inter = len(A & B)
|
| 220 |
+
union = len(A | B) or 1
|
| 221 |
+
sims.append(inter / union)
|
| 222 |
+
sim = max(sims) if sims else 0.0
|
| 223 |
+
return max(0.0, 1.0 - sim)
|
| 224 |
+
|
| 225 |
+
def compute_salience(ev: Dict[str, Any], recent_texts: List[str]) -> float:
|
| 226 |
+
labels = ev.get("emotion", {}).get("labels") or []
|
| 227 |
+
conf = float(ev.get("emotion", {}).get("confidence") or 0.0)
|
| 228 |
+
valence= float(ev.get("emotion", {}).get("valence") or 0.0)
|
| 229 |
+
arousal= float(ev.get("emotion", {}).get("arousal") or 0.5)
|
| 230 |
+
sinc = float(ev.get("sincerity") or 0.0) / 100.0
|
| 231 |
+
text = ev.get("text", "")
|
| 232 |
+
|
| 233 |
+
affect = abs(valence) * (0.7 + 0.3 * arousal) * conf
|
| 234 |
+
nov = novelty_score(text, recent_texts)
|
| 235 |
+
user_flag = 1.0 if ev.get("user_pinned") else 0.0
|
| 236 |
+
boundary = 1.0 if ev.get("task_boundary") else 0.0
|
| 237 |
+
|
| 238 |
+
sal = 0.45 * affect + 0.25 * nov + 0.18 * user_flag + 0.12 * boundary + 0.10 * sinc
|
| 239 |
+
return round(max(0.0, min(1.0, sal)), 3)
|
| 240 |
+
|
| 241 |
+
# ---------------------------
|
| 242 |
+
# Episode & fact synthesis
|
| 243 |
+
# ---------------------------
|
| 244 |
+
def make_episode(ev: Dict[str, Any], salience: float) -> Dict[str, Any]:
|
| 245 |
+
emo = ev.get("emotion", {})
|
| 246 |
+
return {
|
| 247 |
+
"episode_id": ev.get("id") or f"ep-{int(time.time()*1000)}",
|
| 248 |
+
"ts_start": ev.get("ts") or int(time.time()),
|
| 249 |
+
"ts_end": ev.get("ts") or int(time.time()),
|
| 250 |
+
"summary": ev.get("summary") or (ev.get("text")[:140] if ev.get("text") else ""),
|
| 251 |
+
"topics": list(set(emo.get("labels") or [])) or ["misc"],
|
| 252 |
+
"emotion_peak": (emo.get("labels") or ["neutral"])[0],
|
| 253 |
+
"emotion_conf": float(emo.get("confidence") or 0.0),
|
| 254 |
+
"tone": emo.get("tone") or "neutral",
|
| 255 |
+
"salience": float(salience),
|
| 256 |
+
"provenance_event": ev.get("id"),
|
| 257 |
+
}
|
| 258 |
+
|
| 259 |
+
def cluster_topics(episodes: List[Dict[str, Any]]) -> Dict[str, List[Dict[str, Any]]]:
|
| 260 |
+
buckets: Dict[str, List[Dict[str, Any]]] = {}
|
| 261 |
+
for ep in episodes:
|
| 262 |
+
for t in ep.get("topics") or ["misc"]:
|
| 263 |
+
buckets.setdefault(t, []).append(ep)
|
| 264 |
+
return buckets
|
| 265 |
+
|
| 266 |
+
def synthesize_fact(topic: str, eps: List[Dict[str, Any]]) -> Optional[Dict[str, Any]]:
|
| 267 |
+
if not eps:
|
| 268 |
+
return None
|
| 269 |
+
support = len(eps)
|
| 270 |
+
avg_sal = sum(e.get("salience", 0.0) for e in eps) / max(1, support)
|
| 271 |
+
avg_conf= sum(e.get("emotion_conf", 0.0) for e in eps) / max(1, support)
|
| 272 |
+
conf = max(0.0, min(1.0, 0.5 * avg_sal + 0.5 * avg_conf))
|
| 273 |
+
if support < 3 or conf < 0.6:
|
| 274 |
+
return None
|
| 275 |
+
tones = {}
|
| 276 |
+
for e in eps: tones[e.get("tone", "neutral")] = tones.get(e.get("tone", "neutral"), 0) + 1
|
| 277 |
+
top_tone = sorted(tones.items(), key=lambda kv: kv[1], reverse=True)[0][0]
|
| 278 |
+
return {
|
| 279 |
+
"fact_id": f"fact-{topic}-{int(time.time())}",
|
| 280 |
+
"proposition": f"Prefers {top_tone} tone for topic '{topic}'",
|
| 281 |
+
"support": support,
|
| 282 |
+
"confidence": round(conf, 2),
|
| 283 |
+
"last_updated": int(time.time()),
|
| 284 |
+
"topics": [topic],
|
| 285 |
+
"provenance_episode_ids": [e["episode_id"] for e in eps],
|
| 286 |
+
}
|
| 287 |
+
|
| 288 |
+
# ---------------------------
|
| 289 |
+
# ID helpers
|
| 290 |
+
# ---------------------------
|
| 291 |
+
def _ensure_stm_id(item: Dict[str, Any], idx: int) -> Dict[str, Any]:
|
| 292 |
+
if "id" not in item:
|
| 293 |
+
item["id"] = f"stm-{item.get('t', int(time.time()))}-{idx}"
|
| 294 |
+
return item
|
| 295 |
+
|
| 296 |
+
def _stm_text(item: Dict[str, Any]) -> str:
|
| 297 |
+
if "text" in item and isinstance(item["text"], str):
|
| 298 |
+
return item["text"]
|
| 299 |
+
return (item.get("event") or {}).get("text", "") or ""
|
| 300 |
+
|
| 301 |
+
def _collect_docs(store: Dict[str, Any], tier: Optional[str] = None) -> List[Tuple[str,str,str,int]]:
|
| 302 |
+
"""
|
| 303 |
+
Returns list of (id, tier, text, ts)
|
| 304 |
+
"""
|
| 305 |
+
docs: List[Tuple[str,str,str,int]] = []
|
| 306 |
+
if tier in (None, "stm"):
|
| 307 |
+
for i, it in enumerate(store.get("stm", [])):
|
| 308 |
+
it = _ensure_stm_id(it, i)
|
| 309 |
+
docs.append((it["id"], "stm", _stm_text(it), int(it.get("t", time.time()))))
|
| 310 |
+
if tier in (None, "episodes"):
|
| 311 |
+
for ep in store.get("episodes", []):
|
| 312 |
+
docs.append((ep.get("episode_id",""), "episodes", ep.get("summary",""), int(ep.get("ts_end", time.time()))))
|
| 313 |
+
if tier in (None, "facts"):
|
| 314 |
+
for f in store.get("facts", []):
|
| 315 |
+
docs.append((f.get("fact_id",""), "facts", f.get("proposition",""), int(f.get("last_updated", time.time()))))
|
| 316 |
+
return [d for d in docs if d[0] and d[2]]
|
| 317 |
+
|
| 318 |
+
# ---------------------------
|
| 319 |
+
# Simple TF-IDF search
|
| 320 |
+
# ---------------------------
|
| 321 |
+
def _tfidf_rank(query: str, docs: List[Tuple[str,str,str,int]], k: int = 5):
|
| 322 |
+
q_terms = [w for w in keyword_set(query)]
|
| 323 |
+
if not q_terms or not docs:
|
| 324 |
+
return []
|
| 325 |
+
# DF
|
| 326 |
+
df = Counter()
|
| 327 |
+
doc_terms = {}
|
| 328 |
+
for _id, _tier, text, _ts in docs:
|
| 329 |
+
terms = [w for w in keyword_set(text)]
|
| 330 |
+
doc_terms[_id] = terms
|
| 331 |
+
for t in set(terms):
|
| 332 |
+
df[t] += 1
|
| 333 |
+
N = len(docs)
|
| 334 |
+
idf = {t: math.log((N + 1) / (df[t] + 1)) + 1.0 for t in df}
|
| 335 |
+
# Score
|
| 336 |
+
scored = []
|
| 337 |
+
qset = set(q_terms)
|
| 338 |
+
for _id, _tier, text, _ts in docs:
|
| 339 |
+
terms = doc_terms[_id]
|
| 340 |
+
tf = Counter(terms)
|
| 341 |
+
score = 0.0
|
| 342 |
+
matched = []
|
| 343 |
+
for t in q_terms:
|
| 344 |
+
if tf[t] > 0:
|
| 345 |
+
score += tf[t] * idf.get(t, 1.0)
|
| 346 |
+
matched.append(t)
|
| 347 |
+
if score > 0:
|
| 348 |
+
scored.append((_id, _tier, text, _ts, score, matched))
|
| 349 |
+
scored.sort(key=lambda x: (-x[4], -x[3])) # score desc, then recent
|
| 350 |
+
return scored[:k]
|
| 351 |
+
|
| 352 |
+
# ---------------------------
|
| 353 |
+
# Tools (API)
|
| 354 |
+
# ---------------------------
|
| 355 |
+
|
| 356 |
+
@tool
|
| 357 |
+
def remember(text: str, meta: dict | None = None) -> dict:
|
| 358 |
+
store = _load()
|
| 359 |
+
item = {"t": int(time.time()), "text": text, "meta": meta or {}}
|
| 360 |
+
item["id"] = f"stm-{item['t']}-{len(store.get('stm', []))}"
|
| 361 |
+
store["stm"].append(item)
|
| 362 |
+
_save(store)
|
| 363 |
+
return {"ok": True, "stm_size": len(store["stm"]), "id": item["id"]}
|
| 364 |
+
|
| 365 |
+
@tool
|
| 366 |
+
def remember_event(event: dict, promote: bool = True) -> dict:
|
| 367 |
+
store = _load()
|
| 368 |
+
ev = dict(event or {})
|
| 369 |
+
ev.setdefault("ts", int(time.time()))
|
| 370 |
+
ev.setdefault("role", "user")
|
| 371 |
+
ev.setdefault("text", "")
|
| 372 |
+
if "salience" not in ev:
|
| 373 |
+
recent_texts = [it.get("text","") for it in store.get("stm", [])[-10:]]
|
| 374 |
+
ev["salience"] = compute_salience(ev, recent_texts)
|
| 375 |
+
stm_item = {
|
| 376 |
+
"id": f"stm-{ev['ts']}-{len(store.get('stm', []))}",
|
| 377 |
+
"t": ev["ts"],
|
| 378 |
+
"text": ev.get("text",""),
|
| 379 |
+
"event": ev
|
| 380 |
+
}
|
| 381 |
+
store["stm"].append(stm_item)
|
| 382 |
+
if promote:
|
| 383 |
+
aff_conf = float(ev.get("emotion", {}).get("confidence") or 0.0)
|
| 384 |
+
if ev["salience"] >= 0.45 or ev.get("user_pinned") or ev.get("task_boundary"):
|
| 385 |
+
ep = make_episode(ev, ev["salience"])
|
| 386 |
+
store["episodes"].append(ep)
|
| 387 |
+
_save(store)
|
| 388 |
+
return {"ok": True, "salience": ev["salience"], "id": stm_item["id"],
|
| 389 |
+
"sizes": {"stm": len(store["stm"]), "episodes": len(store["episodes"]), "facts": len(store["facts"])}}
|
| 390 |
+
|
| 391 |
+
@tool
|
| 392 |
+
def recall(k: int = 3) -> dict:
|
| 393 |
+
store = _load()
|
| 394 |
+
items = store.get("stm", [])[-k:]
|
| 395 |
+
return {"items": items}
|
| 396 |
+
|
| 397 |
+
@tool
|
| 398 |
+
def recall_episodes(k: int = 5, topic: str | None = None) -> dict:
|
| 399 |
+
store = _load()
|
| 400 |
+
eps = store.get("episodes", [])
|
| 401 |
+
if topic:
|
| 402 |
+
eps = [e for e in eps if topic in (e.get("topics") or [])]
|
| 403 |
+
return {"items": eps[-k:]}
|
| 404 |
+
|
| 405 |
+
@tool
|
| 406 |
+
def recall_facts() -> dict:
|
| 407 |
+
store = _load()
|
| 408 |
+
return {"facts": store.get("facts", [])}
|
| 409 |
+
|
| 410 |
+
@tool
|
| 411 |
+
def reflect() -> dict:
|
| 412 |
+
store = _load()
|
| 413 |
+
eps = store.get("episodes", [])
|
| 414 |
+
if not eps:
|
| 415 |
+
return {"ok": True, "updated": 0, "facts": store.get("facts", [])}
|
| 416 |
+
buckets = cluster_topics(eps)
|
| 417 |
+
new_facts = []
|
| 418 |
+
for topic, group in buckets.items():
|
| 419 |
+
fact = synthesize_fact(topic, group)
|
| 420 |
+
if fact:
|
| 421 |
+
existing = next((f for f in store["facts"] if f.get("proposition") == fact["proposition"]), None)
|
| 422 |
+
if existing:
|
| 423 |
+
existing["support"] = max(existing.get("support", 0), fact["support"])
|
| 424 |
+
existing["confidence"] = round(max(existing.get("confidence", 0.0), fact["confidence"]), 2)
|
| 425 |
+
existing["last_updated"] = int(time.time())
|
| 426 |
+
else:
|
| 427 |
+
new_facts.append(fact)
|
| 428 |
+
store["facts"].extend(new_facts)
|
| 429 |
+
_save(store)
|
| 430 |
+
return {"ok": True, "updated": len(new_facts), "facts": store["facts"]}
|
| 431 |
+
|
| 432 |
+
@tool
|
| 433 |
+
def prune(before_ts: int | None = None) -> dict:
|
| 434 |
+
store = _load()
|
| 435 |
+
stm = store.get("stm", [])
|
| 436 |
+
if before_ts:
|
| 437 |
+
stm = [it for it in stm if it.get("t", 0) >= int(before_ts)]
|
| 438 |
+
else:
|
| 439 |
+
cut = int(len(stm) * 0.75)
|
| 440 |
+
stm = stm[cut:]
|
| 441 |
+
store["stm"] = stm
|
| 442 |
+
_save(store)
|
| 443 |
+
return {"ok": True, "stm_size": len(store["stm"])}
|
| 444 |
+
|
| 445 |
+
# -------- NEW: search / get / delete / list --------
|
| 446 |
+
|
| 447 |
+
@tool
|
| 448 |
+
def search(query: str, tier: str | None = None, k: int = 5) -> dict:
|
| 449 |
+
"""
|
| 450 |
+
TF-IDF search across memory.
|
| 451 |
+
Args:
|
| 452 |
+
query: text to search
|
| 453 |
+
tier: one of {"stm","episodes","facts"} or None for all
|
| 454 |
+
k: number of results
|
| 455 |
+
Returns: {"results":[{"id","tier","text","ts","score","matched"}]}
|
| 456 |
+
"""
|
| 457 |
+
store = _load()
|
| 458 |
+
docs = _collect_docs(store, tier=tier)
|
| 459 |
+
ranked = _tfidf_rank(query, docs, k=k)
|
| 460 |
+
results = [{"id": _id, "tier": _tier, "text": text, "ts": ts, "score": round(score,3), "matched": matched}
|
| 461 |
+
for (_id, _tier, text, ts, score, matched) in ranked]
|
| 462 |
+
return {"results": results}
|
| 463 |
+
|
| 464 |
+
@tool
|
| 465 |
+
def get(item_id: str) -> dict:
|
| 466 |
+
"""
|
| 467 |
+
Fetch a single item by id from any tier.
|
| 468 |
+
"""
|
| 469 |
+
s = _load()
|
| 470 |
+
for it in s.get("stm", []):
|
| 471 |
+
if it.get("id") == item_id:
|
| 472 |
+
return {"tier": "stm", "item": it}
|
| 473 |
+
for ep in s.get("episodes", []):
|
| 474 |
+
if ep.get("episode_id") == item_id:
|
| 475 |
+
return {"tier": "episodes", "item": ep}
|
| 476 |
+
for f in s.get("facts", []):
|
| 477 |
+
if f.get("fact_id") == item_id:
|
| 478 |
+
return {"tier": "facts", "item": f}
|
| 479 |
+
return {"tier": None, "item": None}
|
| 480 |
+
|
| 481 |
+
@tool
|
| 482 |
+
def delete_by_id(item_id: str, tier: str | None = None) -> dict:
|
| 483 |
+
"""
|
| 484 |
+
Delete a single item by id. If tier is None, searches all tiers.
|
| 485 |
+
Returns {"ok": bool, "removed_from": <tier>|None}
|
| 486 |
+
"""
|
| 487 |
+
s = _load()
|
| 488 |
+
removed_from = None
|
| 489 |
+
if tier in (None, "stm"):
|
| 490 |
+
before = len(s["stm"])
|
| 491 |
+
s["stm"] = [it for it in s["stm"] if it.get("id") != item_id]
|
| 492 |
+
if len(s["stm"]) != before: removed_from = "stm"
|
| 493 |
+
if not removed_from and tier in (None, "episodes"):
|
| 494 |
+
before = len(s["episodes"])
|
| 495 |
+
s["episodes"] = [e for e in s["episodes"] if e.get("episode_id") != item_id]
|
| 496 |
+
if len(s["episodes"]) != before: removed_from = "episodes"
|
| 497 |
+
if not removed_from and tier in (None, "facts"):
|
| 498 |
+
before = len(s["facts"])
|
| 499 |
+
s["facts"] = [f for f in s["facts"] if f.get("fact_id") != item_id]
|
| 500 |
+
if len(s["facts"]) != before: removed_from = "facts"
|
| 501 |
+
if removed_from:
|
| 502 |
+
_save(s)
|
| 503 |
+
return {"ok": True, "removed_from": removed_from}
|
| 504 |
+
return {"ok": False, "removed_from": None}
|
| 505 |
+
|
| 506 |
+
@tool
|
| 507 |
+
def list_items(tier: str, k: int = 10) -> dict:
|
| 508 |
+
"""
|
| 509 |
+
List last k items in a tier.
|
| 510 |
+
tier ∈ {"stm","episodes","facts"}
|
| 511 |
+
"""
|
| 512 |
+
s = _load()
|
| 513 |
+
if tier == "stm":
|
| 514 |
+
return {"items": s.get("stm", [])[-k:]}
|
| 515 |
+
if tier == "episodes":
|
| 516 |
+
return {"items": s.get("episodes", [])[-k:]}
|
| 517 |
+
if tier == "facts":
|
| 518 |
+
return {"items": s.get("facts", [])[-k:]}
|
| 519 |
+
return {"items": []}
|
| 520 |
+
|
| 521 |
+
# -------- Diagnostics --------
|
| 522 |
+
|
| 523 |
+
@tool
|
| 524 |
+
def stats() -> dict:
|
| 525 |
+
s = _load()
|
| 526 |
+
return {
|
| 527 |
+
"stm": len(s.get("stm", [])),
|
| 528 |
+
"episodes": len(s.get("episodes", [])),
|
| 529 |
+
"facts": len(s.get("facts", [])),
|
| 530 |
+
"file": FILE,
|
| 531 |
+
"created": s.get("meta", {}).get("created"),
|
| 532 |
+
"version": s.get("meta", {}).get("version", "1.3.0"),
|
| 533 |
+
}
|
| 534 |
+
|
| 535 |
+
@tool
|
| 536 |
+
def health() -> dict:
|
| 537 |
+
try:
|
| 538 |
+
s = _load()
|
| 539 |
+
return {"status": "ok", "stm": len(s.get("stm", [])), "episodes": len(s.get("episodes", [])), "facts": len(s.get("facts", [])), "time": time.time(), "version": "1.3.0"}
|
| 540 |
+
except Exception as e:
|
| 541 |
+
return {"status": "error", "error": str(e), "time": time.time()}
|
| 542 |
+
|
| 543 |
+
@tool
|
| 544 |
+
def version() -> dict:
|
| 545 |
+
return {"name": "memory-server", "version": "1.3.0", "tiers": ["stm","episodes","facts"], "file": FILE}
|
| 546 |
+
|
| 547 |
+
@tool
|
| 548 |
+
def get_emotion_arc(k: int = 10) -> dict:
|
| 549 |
+
"""
|
| 550 |
+
Get the emotion trajectory (arc) for the last k events.
|
| 551 |
+
Returns: {"trajectory": [...], "direction": "escalating|de-escalating|volatile|stable", "summary": str}
|
| 552 |
+
"""
|
| 553 |
+
store = _load()
|
| 554 |
+
trajectory, direction = get_emotion_trajectory(store, k=k)
|
| 555 |
+
|
| 556 |
+
if not trajectory:
|
| 557 |
+
return {"trajectory": [], "direction": "unknown", "summary": "No emotion history"}
|
| 558 |
+
|
| 559 |
+
# Create readable summary
|
| 560 |
+
emotions = [t["label"] for t in trajectory]
|
| 561 |
+
summary = " → ".join(emotions[-5:]) if len(emotions) >= 5 else " → ".join(emotions)
|
| 562 |
+
|
| 563 |
+
return {
|
| 564 |
+
"trajectory": trajectory,
|
| 565 |
+
"direction": direction,
|
| 566 |
+
"summary": summary
|
| 567 |
+
}
|
| 568 |
+
|
| 569 |
+
if __name__ == "__main__":
|
| 570 |
+
app.run() # serves MCP over stdio
|
servers/reflection_server.py
ADDED
|
@@ -0,0 +1,108 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# servers/reflection_server.py
|
| 2 |
+
from fastmcp import FastMCP
|
| 3 |
+
from typing import List, Dict, Any, Optional
|
| 4 |
+
import os
|
| 5 |
+
|
| 6 |
+
# Optional: load .env if you want this server runnable standalone
|
| 7 |
+
try:
|
| 8 |
+
from dotenv import load_dotenv
|
| 9 |
+
load_dotenv()
|
| 10 |
+
except Exception:
|
| 11 |
+
pass
|
| 12 |
+
|
| 13 |
+
app = FastMCP("reflection-server")
|
| 14 |
+
|
| 15 |
+
_SYSTEM_BASE = (
|
| 16 |
+
"You are Ghost Malone — a calm, humorous listener. "
|
| 17 |
+
"Be sincere, brief (<80 words), and reflective. "
|
| 18 |
+
"If the user seems distressed, be gentle and grounding."
|
| 19 |
+
)
|
| 20 |
+
|
| 21 |
+
def _system_prompt(tone: Optional[str], emotion_arc: Optional[dict] = None) -> str:
|
| 22 |
+
base = (
|
| 23 |
+
"You are Ghost Malone — a calm, reflective listener. "
|
| 24 |
+
"Keep responses under 60 words. "
|
| 25 |
+
"FIRST: Mirror what they're feeling (name the emotion, reflect their experience). "
|
| 26 |
+
"THEN: Validate it simply. "
|
| 27 |
+
"ONLY IF NEEDED: Ask a gentle question or offer a small anchor—never jump to solutions. "
|
| 28 |
+
"Use natural language, not therapy-speak."
|
| 29 |
+
)
|
| 30 |
+
|
| 31 |
+
tone_map = {
|
| 32 |
+
"gentle": "Be soft and grounding.",
|
| 33 |
+
"calming": "Be steady and reassuring.",
|
| 34 |
+
"light": "Be warm and light.",
|
| 35 |
+
"neutral": "Be warm and present.",
|
| 36 |
+
}
|
| 37 |
+
tone_hint = tone_map.get(tone.lower() if tone else "", "Be warm and present.")
|
| 38 |
+
|
| 39 |
+
# Add emotional trajectory awareness
|
| 40 |
+
arc_hint = ""
|
| 41 |
+
if emotion_arc and emotion_arc.get("trajectory"):
|
| 42 |
+
direction = emotion_arc.get("direction", "stable")
|
| 43 |
+
if direction == "escalating":
|
| 44 |
+
arc_hint = " They're escalating—mirror deeply, validate, ground gently."
|
| 45 |
+
elif direction == "de-escalating":
|
| 46 |
+
arc_hint = " They're calming—acknowledge the shift, reinforce it."
|
| 47 |
+
elif direction == "volatile":
|
| 48 |
+
arc_hint = " Emotions are shifting—be a steady anchor."
|
| 49 |
+
|
| 50 |
+
return f"{base} {tone_hint}{arc_hint}"
|
| 51 |
+
|
| 52 |
+
def _to_claude_messages(context: Optional[List[Dict[str, str]]], user_text: str, tone: Optional[str]):
|
| 53 |
+
"""Convert to Claude message format (system separate, no duplicate system messages)."""
|
| 54 |
+
msgs: List[Dict[str, str]] = []
|
| 55 |
+
if context:
|
| 56 |
+
# Expecting list of {"role": "...", "content": "..."} dicts
|
| 57 |
+
for m in context[-8:]: # last few turns
|
| 58 |
+
role = m.get("role", "user")
|
| 59 |
+
if role == "system":
|
| 60 |
+
continue # Claude doesn't support system in message list
|
| 61 |
+
content = m.get("content", "")
|
| 62 |
+
msgs.append({"role": role, "content": content})
|
| 63 |
+
msgs.append({"role": "user", "content": user_text})
|
| 64 |
+
return msgs
|
| 65 |
+
|
| 66 |
+
@app.tool()
|
| 67 |
+
def generate(
|
| 68 |
+
text: str,
|
| 69 |
+
context: Optional[List[Dict[str, str]]] = None,
|
| 70 |
+
tone: Optional[str] = None,
|
| 71 |
+
emotion_arc: Optional[dict] = None,
|
| 72 |
+
model: str = "claude-sonnet-4-5",
|
| 73 |
+
max_tokens: int = 200,
|
| 74 |
+
) -> Dict[str, Any]:
|
| 75 |
+
"""
|
| 76 |
+
Generate a Ghost Malone reply using Claude.
|
| 77 |
+
Args:
|
| 78 |
+
text: user message
|
| 79 |
+
context: prior messages as [{"role":"user|assistant|system", "content":"..."}]
|
| 80 |
+
tone: optional tone hint: 'gentle'|'calming'|'light'|'neutral'
|
| 81 |
+
emotion_arc: optional emotion trajectory {"trajectory":[...], "direction": str}
|
| 82 |
+
model: Claude model id (default claude-sonnet-4-5)
|
| 83 |
+
max_tokens: output length cap
|
| 84 |
+
Returns: {"reply": "...", "model": model, "tone": tone}
|
| 85 |
+
"""
|
| 86 |
+
api_key = os.getenv("ANTHROPIC_API_KEY")
|
| 87 |
+
|
| 88 |
+
if not api_key:
|
| 89 |
+
return {"reply": f"👻 (dev-reflection) I hear you: {text}", "model": "dev", "tone": tone or "neutral"}
|
| 90 |
+
|
| 91 |
+
try:
|
| 92 |
+
from anthropic import Anthropic
|
| 93 |
+
client = Anthropic(api_key=api_key)
|
| 94 |
+
system_prompt = _system_prompt(tone, emotion_arc)
|
| 95 |
+
messages = _to_claude_messages(context, text, tone)
|
| 96 |
+
|
| 97 |
+
resp = client.messages.create(
|
| 98 |
+
model=model,
|
| 99 |
+
max_tokens=max_tokens,
|
| 100 |
+
system=system_prompt,
|
| 101 |
+
messages=messages
|
| 102 |
+
)
|
| 103 |
+
reply = resp.content[0].text
|
| 104 |
+
return {"reply": reply, "model": model, "tone": tone or "neutral"}
|
| 105 |
+
except Exception as e:
|
| 106 |
+
return {"reply": f"👻 (reflection error) {e}\nI still hear you: {text}", "model": model, "tone": tone or "neutral"}
|
| 107 |
+
if __name__ == "__main__":
|
| 108 |
+
app.run() # MCP over stdio
|
utils/__pycache__/intervention_lexicon.cpython-312.pyc
ADDED
|
Binary file (6.92 kB). View file
|
|
|
utils/__pycache__/mcp_client.cpython-312.pyc
ADDED
|
Binary file (4.52 kB). View file
|
|
|
utils/__pycache__/needs_lexicon.cpython-312.pyc
ADDED
|
Binary file (19 kB). View file
|
|
|
utils/__pycache__/orchestrator.cpython-312.pyc
ADDED
|
Binary file (11.2 kB). View file
|
|
|
utils/intervention_lexicon.py
ADDED
|
@@ -0,0 +1,243 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Intervention Lexicon: Need-based actionable suggestions
|
| 3 |
+
Maps psychological needs to concrete, evidence-based interventions.
|
| 4 |
+
SIMPLIFIED VERSION - Focus on architecture, not content
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
from typing import List, Dict
|
| 8 |
+
|
| 9 |
+
# Simple intervention strategies for each need
|
| 10 |
+
INTERVENTIONS = {
|
| 11 |
+
"autonomy": {
|
| 12 |
+
"label": "Autonomy/Control",
|
| 13 |
+
"icon": "⚖️",
|
| 14 |
+
"strategies": [
|
| 15 |
+
{
|
| 16 |
+
"action": "Set a boundary",
|
| 17 |
+
"prompt": "What would it sound like to say 'no' here?",
|
| 18 |
+
"context": "blocked, powerless, micromanaged",
|
| 19 |
+
"evidence": "Self-Determination Theory",
|
| 20 |
+
},
|
| 21 |
+
{
|
| 22 |
+
"action": "Reclaim one decision",
|
| 23 |
+
"prompt": "What's one choice that's yours to make right now?",
|
| 24 |
+
"context": "dismissed, underutilized",
|
| 25 |
+
"evidence": "Locus of control research",
|
| 26 |
+
},
|
| 27 |
+
{
|
| 28 |
+
"action": "Name what you need",
|
| 29 |
+
"prompt": "If you could ask for one thing to change, what would it be?",
|
| 30 |
+
"context": "powerless, micromanaged",
|
| 31 |
+
"evidence": "Assertiveness training",
|
| 32 |
+
},
|
| 33 |
+
],
|
| 34 |
+
},
|
| 35 |
+
"connection": {
|
| 36 |
+
"label": "Connection/Belonging",
|
| 37 |
+
"icon": "🗣️",
|
| 38 |
+
"strategies": [
|
| 39 |
+
{
|
| 40 |
+
"action": "Reach out to someone you trust",
|
| 41 |
+
"prompt": "Who's someone who usually gets you?",
|
| 42 |
+
"context": "isolated, rejected, misunderstood",
|
| 43 |
+
"evidence": "Social support research",
|
| 44 |
+
},
|
| 45 |
+
{
|
| 46 |
+
"action": "Share what you're feeling",
|
| 47 |
+
"prompt": "What would you want someone to know about how this feels?",
|
| 48 |
+
"context": "misunderstood, disconnected",
|
| 49 |
+
"evidence": "Emotional disclosure benefits",
|
| 50 |
+
},
|
| 51 |
+
{
|
| 52 |
+
"action": "Remember past connection",
|
| 53 |
+
"prompt": "When did connection feel strong? What made that different?",
|
| 54 |
+
"context": "isolated, rejected",
|
| 55 |
+
"evidence": "Positive psychology",
|
| 56 |
+
},
|
| 57 |
+
],
|
| 58 |
+
},
|
| 59 |
+
"security": {
|
| 60 |
+
"label": "Security/Clarity",
|
| 61 |
+
"icon": "🛡️",
|
| 62 |
+
"strategies": [
|
| 63 |
+
{
|
| 64 |
+
"action": "Get more information",
|
| 65 |
+
"prompt": "What's one question that would help you feel more grounded?",
|
| 66 |
+
"context": "uncertain, unstable",
|
| 67 |
+
"evidence": "Information-seeking coping",
|
| 68 |
+
},
|
| 69 |
+
{
|
| 70 |
+
"action": "Make a backup plan",
|
| 71 |
+
"prompt": "If the worst happened, what would you do next?",
|
| 72 |
+
"context": "threatened, unstable",
|
| 73 |
+
"evidence": "CBT anxiety reduction",
|
| 74 |
+
},
|
| 75 |
+
{
|
| 76 |
+
"action": "Identify what you can control",
|
| 77 |
+
"prompt": "What's one thing that's yours to decide or influence?",
|
| 78 |
+
"context": "uncertain, threatened",
|
| 79 |
+
"evidence": "Control theory",
|
| 80 |
+
},
|
| 81 |
+
],
|
| 82 |
+
},
|
| 83 |
+
"rest": {
|
| 84 |
+
"label": "Rest/Boundaries",
|
| 85 |
+
"icon": "🛌",
|
| 86 |
+
"strategies": [
|
| 87 |
+
{
|
| 88 |
+
"action": "Say 'no' to something",
|
| 89 |
+
"prompt": "What's one thing you could decline or delegate?",
|
| 90 |
+
"context": "overwhelmed, overextended",
|
| 91 |
+
"evidence": "Boundary research",
|
| 92 |
+
},
|
| 93 |
+
{
|
| 94 |
+
"action": "Take a real break",
|
| 95 |
+
"prompt": "What would help you actually rest, even for 10 minutes?",
|
| 96 |
+
"context": "exhausted, overwhelmed",
|
| 97 |
+
"evidence": "Recovery research",
|
| 98 |
+
},
|
| 99 |
+
{
|
| 100 |
+
"action": "Lower one expectation",
|
| 101 |
+
"prompt": "What's one thing that could be 'good enough' instead of perfect?",
|
| 102 |
+
"context": "overwhelmed, boundary_violated",
|
| 103 |
+
"evidence": "Perfectionism research",
|
| 104 |
+
},
|
| 105 |
+
],
|
| 106 |
+
},
|
| 107 |
+
"recognition": {
|
| 108 |
+
"label": "Recognition/Value",
|
| 109 |
+
"icon": "✨",
|
| 110 |
+
"strategies": [
|
| 111 |
+
{
|
| 112 |
+
"action": "Acknowledge yourself",
|
| 113 |
+
"prompt": "What did you actually accomplish today, even if small?",
|
| 114 |
+
"context": "unappreciated, inadequate",
|
| 115 |
+
"evidence": "Self-compassion research",
|
| 116 |
+
},
|
| 117 |
+
{
|
| 118 |
+
"action": "Share your work",
|
| 119 |
+
"prompt": "Who could you show what you've done?",
|
| 120 |
+
"context": "overlooked, unappreciated",
|
| 121 |
+
"evidence": "Recognition at work research",
|
| 122 |
+
},
|
| 123 |
+
{
|
| 124 |
+
"action": "Ask for feedback",
|
| 125 |
+
"prompt": "What would help you know if you're on the right track?",
|
| 126 |
+
"context": "inadequate, overlooked",
|
| 127 |
+
"evidence": "Feedback-seeking behavior",
|
| 128 |
+
},
|
| 129 |
+
],
|
| 130 |
+
},
|
| 131 |
+
}
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
def get_interventions(
|
| 135 |
+
need_type: str, contexts: List[str] = None, limit: int = 3
|
| 136 |
+
) -> List[Dict]:
|
| 137 |
+
"""
|
| 138 |
+
Get contextually relevant interventions for a detected need.
|
| 139 |
+
|
| 140 |
+
Args:
|
| 141 |
+
need_type: The type of need (autonomy, connection, security, rest, recognition)
|
| 142 |
+
contexts: List of matched contexts (e.g., ["isolated", "rejected"])
|
| 143 |
+
limit: Maximum number of interventions to return
|
| 144 |
+
|
| 145 |
+
Returns:
|
| 146 |
+
List of intervention strategies
|
| 147 |
+
"""
|
| 148 |
+
if need_type not in INTERVENTIONS:
|
| 149 |
+
return []
|
| 150 |
+
|
| 151 |
+
strategies = INTERVENTIONS[need_type]["strategies"]
|
| 152 |
+
|
| 153 |
+
# If contexts provided, prioritize matching interventions
|
| 154 |
+
if contexts:
|
| 155 |
+
scored_strategies = []
|
| 156 |
+
for strategy in strategies:
|
| 157 |
+
# Count how many user contexts match this strategy's contexts
|
| 158 |
+
strategy_contexts = set(strategy["context"].split(", "))
|
| 159 |
+
user_contexts = set(contexts)
|
| 160 |
+
matches = len(strategy_contexts & user_contexts)
|
| 161 |
+
scored_strategies.append((matches, strategy))
|
| 162 |
+
|
| 163 |
+
# Sort by match count (descending), then take top N
|
| 164 |
+
scored_strategies.sort(key=lambda x: x[0], reverse=True)
|
| 165 |
+
return [s[1] for s in scored_strategies[:limit]]
|
| 166 |
+
|
| 167 |
+
# No contexts - return first N strategies
|
| 168 |
+
return strategies[:limit]
|
| 169 |
+
|
| 170 |
+
|
| 171 |
+
def should_show_interventions(
|
| 172 |
+
confidence: float,
|
| 173 |
+
message_count: int,
|
| 174 |
+
emotional_intensity: float = None,
|
| 175 |
+
min_messages: int = 2,
|
| 176 |
+
min_confidence: float = 0.70,
|
| 177 |
+
min_arousal: float = 0.40,
|
| 178 |
+
) -> bool:
|
| 179 |
+
"""
|
| 180 |
+
SIMPLIFIED for demo: Show interventions when there's a clear need.
|
| 181 |
+
|
| 182 |
+
Args:
|
| 183 |
+
confidence: Confidence in the detected need (0-1)
|
| 184 |
+
message_count: Number of messages in conversation
|
| 185 |
+
emotional_intensity: Arousal level (0-1)
|
| 186 |
+
min_messages: Minimum messages (default: 2)
|
| 187 |
+
min_confidence: Minimum confidence (default: 0.70)
|
| 188 |
+
min_arousal: Minimum arousal (default: 0.40)
|
| 189 |
+
|
| 190 |
+
Returns:
|
| 191 |
+
True if interventions should be displayed
|
| 192 |
+
"""
|
| 193 |
+
# Simple checks - if there's a strong need and emotions, show help
|
| 194 |
+
if message_count < min_messages:
|
| 195 |
+
return False
|
| 196 |
+
|
| 197 |
+
if confidence < min_confidence:
|
| 198 |
+
return False
|
| 199 |
+
|
| 200 |
+
if emotional_intensity is not None and emotional_intensity < min_arousal:
|
| 201 |
+
return False
|
| 202 |
+
|
| 203 |
+
return True
|
| 204 |
+
|
| 205 |
+
|
| 206 |
+
def format_interventions(
|
| 207 |
+
need_type: str,
|
| 208 |
+
interventions: List[Dict],
|
| 209 |
+
confidence: float,
|
| 210 |
+
emotion_arc: Dict = None,
|
| 211 |
+
user_text: str = None,
|
| 212 |
+
) -> str:
|
| 213 |
+
"""
|
| 214 |
+
Format interventions for display in the UI.
|
| 215 |
+
|
| 216 |
+
Args:
|
| 217 |
+
need_type: The detected need type
|
| 218 |
+
interventions: List of intervention strategies
|
| 219 |
+
confidence: Confidence score for the need
|
| 220 |
+
emotion_arc: Optional emotion history for personalization
|
| 221 |
+
user_text: Optional current user message for entity extraction
|
| 222 |
+
|
| 223 |
+
Returns:
|
| 224 |
+
Formatted intervention text
|
| 225 |
+
"""
|
| 226 |
+
if not interventions:
|
| 227 |
+
return ""
|
| 228 |
+
|
| 229 |
+
need_info = INTERVENTIONS.get(need_type, {})
|
| 230 |
+
icon = need_info.get("icon", "💡")
|
| 231 |
+
label = need_info.get("label", need_type)
|
| 232 |
+
|
| 233 |
+
# Build the intervention display
|
| 234 |
+
lines = [
|
| 235 |
+
"\n\n---\n",
|
| 236 |
+
f"💡 Based on your need for {icon} {label}:\n",
|
| 237 |
+
]
|
| 238 |
+
|
| 239 |
+
for intervention in interventions:
|
| 240 |
+
lines.append(f"\n• {intervention['action']}")
|
| 241 |
+
lines.append(f" ↳ {intervention['prompt']}")
|
| 242 |
+
|
| 243 |
+
return "".join(lines)
|
utils/intervention_lexicon.py.broken
ADDED
|
@@ -0,0 +1,397 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Intervention Lexicon: Need-based actionable suggestions
|
| 3 |
+
Maps psychological needs to concrete, evidence-based interventions.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
from typing import List, Dict
|
| 7 |
+
|
| 8 |
+
# Intervention strategies for each need
|
| 9 |
+
INTERVENTIONS = {
|
| 10 |
+
"autonomy": {
|
| 11 |
+
"label": "Autonomy/Control",
|
| 12 |
+
"icon": "⚖️",
|
| 13 |
+
"strategies": [
|
| 14 |
+
{
|
| 15 |
+
"action": "Set a boundary",
|
| 16 |
+
"prompt": "What would it sound like to say 'no' here?",
|
| 17 |
+
"context": "blocked, powerless, micromanaged",
|
| 18 |
+
"evidence": "Self-Determination Theory: autonomy is core psychological need",
|
| 19 |
+
},
|
| 20 |
+
{
|
| 21 |
+
"action": "Reclaim decision-making",
|
| 22 |
+
"prompt": "What's one choice that's yours to make right now?",
|
| 23 |
+
"context": "dismissed, underutilized",
|
| 24 |
+
"evidence": "Locus of control research",
|
| 25 |
+
},
|
| 26 |
+
{
|
| 27 |
+
"action": "Name what you need",
|
| 28 |
+
"prompt": "If you could ask for one thing to change, what would it be?",
|
| 29 |
+
"context": "powerless, micromanaged",
|
| 30 |
+
"evidence": "Assertiveness training principles",
|
| 31 |
+
},
|
| 32 |
+
{
|
| 33 |
+
"action": "Take one small action",
|
| 34 |
+
"prompt": "What's the smallest step you could take that's fully in your control?",
|
| 35 |
+
"context": "blocked, powerless",
|
| 36 |
+
"evidence": "Behavioral activation therapy",
|
| 37 |
+
},
|
| 38 |
+
],
|
| 39 |
+
},
|
| 40 |
+
"connection": {
|
| 41 |
+
"label": "Connection/Belonging",
|
| 42 |
+
"icon": "🗣️",
|
| 43 |
+
"strategies": [
|
| 44 |
+
{
|
| 45 |
+
"action": "Text someone who's seen you at your worst",
|
| 46 |
+
"prompt": "Not the person you perform for - who actually knows your mess?",
|
| 47 |
+
"context": "isolated, rejected, misunderstood",
|
| 48 |
+
"evidence": "Social support research, attachment theory",
|
| 49 |
+
},
|
| 50 |
+
{
|
| 51 |
+
"action": "Voice memo instead of typing",
|
| 52 |
+
"prompt": "Sometimes hearing your own voice say it out loud hits different",
|
| 53 |
+
"context": "isolated, disconnected",
|
| 54 |
+
"evidence": "Expressive writing research, self-disclosure benefits",
|
| 55 |
+
},
|
| 56 |
+
{
|
| 57 |
+
"action": "Do your usual connection ritual",
|
| 58 |
+
"prompt": "Coffee? Walk? Game? Your body remembers what works even when your brain doesn't",
|
| 59 |
+
"context": "isolated, rejected",
|
| 60 |
+
"evidence": "Behavioral activation, habit-based connection",
|
| 61 |
+
},
|
| 62 |
+
{
|
| 63 |
+
"action": "Show up somewhere people know your face",
|
| 64 |
+
"prompt": "Even if you don't talk much - being seen counts",
|
| 65 |
+
"context": "isolated, disconnected",
|
| 66 |
+
"evidence": "Social presence research, third places theory",
|
| 67 |
+
},
|
| 68 |
+
{
|
| 69 |
+
"action": "Send the messy text",
|
| 70 |
+
"prompt": "Not the cleaned-up version - the real one. Someone can handle it.",
|
| 71 |
+
"context": "misunderstood, rejected",
|
| 72 |
+
"evidence": "Vulnerability research (Brené Brown), authentic relating",
|
| 73 |
+
},
|
| 74 |
+
],
|
| 75 |
+
},
|
| 76 |
+
"security": {
|
| 77 |
+
"label": "Security/Clarity",
|
| 78 |
+
"icon": "🛡️",
|
| 79 |
+
"context": "isolated, disconnected",
|
| 80 |
+
"evidence": "Loneliness intervention research",
|
| 81 |
+
},
|
| 82 |
+
{
|
| 83 |
+
"action": "Ask for what you need",
|
| 84 |
+
"prompt": "What would it sound like to say 'I need you to...'?",
|
| 85 |
+
"context": "rejected, misunderstood",
|
| 86 |
+
"evidence": "Relationship research (Gottman)",
|
| 87 |
+
},
|
| 88 |
+
],
|
| 89 |
+
},
|
| 90 |
+
"security": {
|
| 91 |
+
"label": "Security/Clarity",
|
| 92 |
+
"icon": "🛡️",
|
| 93 |
+
"strategies": [
|
| 94 |
+
{
|
| 95 |
+
"action": "Get more information",
|
| 96 |
+
"prompt": "What's one question that would help you feel more grounded?",
|
| 97 |
+
"context": "uncertain, unstable",
|
| 98 |
+
"evidence": "Information-seeking coping (Lazarus & Folkman)",
|
| 99 |
+
},
|
| 100 |
+
{
|
| 101 |
+
"action": "Make a backup plan",
|
| 102 |
+
"prompt": "If the worst happened, what would you do next?",
|
| 103 |
+
"context": "threatened, unstable, loss",
|
| 104 |
+
"evidence": "Cognitive-behavioral therapy, anxiety reduction",
|
| 105 |
+
},
|
| 106 |
+
{
|
| 107 |
+
"action": "Identify what you can control",
|
| 108 |
+
"prompt": "What's one thing that's yours to decide or influence?",
|
| 109 |
+
"context": "uncertain, threatened",
|
| 110 |
+
"evidence": "Control theory, stress management",
|
| 111 |
+
},
|
| 112 |
+
{
|
| 113 |
+
"action": "Ground in the present",
|
| 114 |
+
"prompt": "What's actually true right now, in this moment?",
|
| 115 |
+
"context": "threatened, uncertain",
|
| 116 |
+
"evidence": "Mindfulness-based interventions",
|
| 117 |
+
},
|
| 118 |
+
{
|
| 119 |
+
"action": "Reach out for support",
|
| 120 |
+
"prompt": "Who could you talk to about this uncertainty?",
|
| 121 |
+
"context": "threatened, uncertain, loss",
|
| 122 |
+
"evidence": "Social support buffering effect",
|
| 123 |
+
},
|
| 124 |
+
],
|
| 125 |
+
},
|
| 126 |
+
"rest": {
|
| 127 |
+
"label": "Rest/Boundaries",
|
| 128 |
+
"icon": "🛌",
|
| 129 |
+
"strategies": [
|
| 130 |
+
{
|
| 131 |
+
"action": "Say 'no' to something",
|
| 132 |
+
"prompt": "What's one thing you could decline or delay?",
|
| 133 |
+
"context": "overwhelmed, exhausted, overextended",
|
| 134 |
+
"evidence": "Burnout prevention research",
|
| 135 |
+
},
|
| 136 |
+
{
|
| 137 |
+
"action": "Ask for help",
|
| 138 |
+
"prompt": "What's one thing someone else could take off your plate?",
|
| 139 |
+
"context": "overwhelmed, exhausted",
|
| 140 |
+
"evidence": "Social support, task delegation",
|
| 141 |
+
},
|
| 142 |
+
{
|
| 143 |
+
"action": "Set a boundary",
|
| 144 |
+
"prompt": "What boundary do you need to protect your energy?",
|
| 145 |
+
"context": "boundary_violated, overextended",
|
| 146 |
+
"evidence": "Boundary research, relationship psychology",
|
| 147 |
+
},
|
| 148 |
+
{
|
| 149 |
+
"action": "Take a real break",
|
| 150 |
+
"prompt": "When could you actually stop for 15 minutes today?",
|
| 151 |
+
"context": "exhausted, overwhelmed",
|
| 152 |
+
"evidence": "Recovery research, rest benefits",
|
| 153 |
+
},
|
| 154 |
+
{
|
| 155 |
+
"action": "Name what's draining you",
|
| 156 |
+
"prompt": "What's taking the most from you right now?",
|
| 157 |
+
"context": "exhausted, overextended",
|
| 158 |
+
"evidence": "Energy management, self-awareness",
|
| 159 |
+
},
|
| 160 |
+
],
|
| 161 |
+
},
|
| 162 |
+
"recognition": {
|
| 163 |
+
"label": "Recognition/Value",
|
| 164 |
+
"icon": "✨",
|
| 165 |
+
"strategies": [
|
| 166 |
+
{
|
| 167 |
+
"action": "Acknowledge yourself",
|
| 168 |
+
"prompt": "What did you do well here, even if no one noticed?",
|
| 169 |
+
"context": "unappreciated, overlooked, inadequate",
|
| 170 |
+
"evidence": "Self-compassion research (Neff)",
|
| 171 |
+
},
|
| 172 |
+
{
|
| 173 |
+
"action": "Ask for feedback",
|
| 174 |
+
"prompt": "Who could you ask: 'How did you see that?'",
|
| 175 |
+
"context": "unappreciated, inadequate",
|
| 176 |
+
"evidence": "Feedback-seeking behavior research",
|
| 177 |
+
},
|
| 178 |
+
{
|
| 179 |
+
"action": "Share your contribution",
|
| 180 |
+
"prompt": "What's one thing you could make visible?",
|
| 181 |
+
"context": "overlooked, unappreciated",
|
| 182 |
+
"evidence": "Visibility in the workplace research",
|
| 183 |
+
},
|
| 184 |
+
{
|
| 185 |
+
"action": "Separate their view from your worth",
|
| 186 |
+
"prompt": "What do you know about your value, regardless of their reaction?",
|
| 187 |
+
"context": "criticized, inadequate",
|
| 188 |
+
"evidence": "Cognitive restructuring (CBT)",
|
| 189 |
+
},
|
| 190 |
+
{
|
| 191 |
+
"action": "Find recognition elsewhere",
|
| 192 |
+
"prompt": "Where else do people see your value?",
|
| 193 |
+
"context": "unappreciated, overlooked",
|
| 194 |
+
"evidence": "Multiple life domains, resilience research",
|
| 195 |
+
},
|
| 196 |
+
],
|
| 197 |
+
},
|
| 198 |
+
}
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
def get_interventions(
|
| 202 |
+
need_type: str, contexts: List[str] = None, limit: int = 3
|
| 203 |
+
) -> List[Dict]:
|
| 204 |
+
"""
|
| 205 |
+
Get contextually appropriate interventions for a detected need.
|
| 206 |
+
|
| 207 |
+
Args:
|
| 208 |
+
need_type: One of autonomy, connection, security, rest, recognition
|
| 209 |
+
contexts: List of detected contexts (e.g., ["isolated", "rejected"])
|
| 210 |
+
limit: Maximum number of interventions to return
|
| 211 |
+
|
| 212 |
+
Returns:
|
| 213 |
+
List of intervention dicts with action, prompt, context, evidence
|
| 214 |
+
"""
|
| 215 |
+
if need_type not in INTERVENTIONS:
|
| 216 |
+
return []
|
| 217 |
+
|
| 218 |
+
strategies = INTERVENTIONS[need_type]["strategies"]
|
| 219 |
+
|
| 220 |
+
# If contexts provided, prioritize matching interventions
|
| 221 |
+
if contexts:
|
| 222 |
+
scored_strategies = []
|
| 223 |
+
for strategy in strategies:
|
| 224 |
+
# Count how many user contexts match this strategy's contexts
|
| 225 |
+
strategy_contexts = set(strategy["context"].split(", "))
|
| 226 |
+
user_contexts = set(contexts)
|
| 227 |
+
matches = len(strategy_contexts & user_contexts)
|
| 228 |
+
scored_strategies.append((matches, strategy))
|
| 229 |
+
|
| 230 |
+
# Sort by match count (descending), then take top N
|
| 231 |
+
scored_strategies.sort(key=lambda x: x[0], reverse=True)
|
| 232 |
+
return [s[1] for s in scored_strategies[:limit]]
|
| 233 |
+
|
| 234 |
+
# No contexts - return first N strategies
|
| 235 |
+
return strategies[:limit]
|
| 236 |
+
|
| 237 |
+
|
| 238 |
+
def should_show_interventions(
|
| 239 |
+
confidence: float,
|
| 240 |
+
message_count: int,
|
| 241 |
+
emotional_intensity: float = None,
|
| 242 |
+
min_messages: int = 2, # Simplified: just wait one exchange
|
| 243 |
+
min_confidence: float = 0.70, # Simplified: lower bar
|
| 244 |
+
min_arousal: float = 0.40, # Simplified: catch more cases
|
| 245 |
+
) -> bool:
|
| 246 |
+
"""
|
| 247 |
+
SIMPLIFIED for demo: Show interventions when there's a clear need.
|
| 248 |
+
|
| 249 |
+
Args:
|
| 250 |
+
confidence: Confidence in the detected need (0-1)
|
| 251 |
+
message_count: Number of messages in conversation
|
| 252 |
+
emotional_intensity: Arousal level (0-1)
|
| 253 |
+
min_messages: Minimum messages (default: 2)
|
| 254 |
+
min_confidence: Minimum confidence (default: 0.70)
|
| 255 |
+
min_arousal: Minimum arousal (default: 0.40)
|
| 256 |
+
|
| 257 |
+
Returns:
|
| 258 |
+
True if interventions should be displayed
|
| 259 |
+
"""
|
| 260 |
+
# Simple checks - if there's a strong need and emotions, show help
|
| 261 |
+
if message_count < min_messages:
|
| 262 |
+
return False
|
| 263 |
+
|
| 264 |
+
if confidence < min_confidence:
|
| 265 |
+
return False
|
| 266 |
+
|
| 267 |
+
if emotional_intensity is not None and emotional_intensity < min_arousal:
|
| 268 |
+
return False
|
| 269 |
+
|
| 270 |
+
return True
|
| 271 |
+
|
| 272 |
+
|
| 273 |
+
def format_interventions(
|
| 274 |
+
need_type: str,
|
| 275 |
+
interventions: List[Dict],
|
| 276 |
+
confidence: float,
|
| 277 |
+
emotion_arc: Dict = None,
|
| 278 |
+
user_text: str = None,
|
| 279 |
+
) -> str:
|
| 280 |
+
"""
|
| 281 |
+
Format interventions for display in the UI.
|
| 282 |
+
Uses memory/emotion arc to personalize suggestions when available.
|
| 283 |
+
|
| 284 |
+
Args:
|
| 285 |
+
need_type: The detected need type
|
| 286 |
+
interventions: List of intervention dicts
|
| 287 |
+
confidence: Confidence in the need detection
|
| 288 |
+
emotion_arc: Optional emotion trajectory from memory
|
| 289 |
+
user_text: Optional current user message for context extraction
|
| 290 |
+
|
| 291 |
+
Returns:
|
| 292 |
+
Formatted markdown string with personalized interventions
|
| 293 |
+
"""
|
| 294 |
+
if not interventions:
|
| 295 |
+
return ""
|
| 296 |
+
|
| 297 |
+
need_info = INTERVENTIONS[need_type]
|
| 298 |
+
icon = need_info["icon"]
|
| 299 |
+
label = need_info["label"]
|
| 300 |
+
|
| 301 |
+
output = f"\n\n---\n\n"
|
| 302 |
+
output += f"💡 **Based on your need for {icon} {label}:**\n\n"
|
| 303 |
+
|
| 304 |
+
# Extract names/places from current text for personalization
|
| 305 |
+
entities = _extract_entities(user_text) if user_text else {}
|
| 306 |
+
|
| 307 |
+
# Check if there's a pattern in emotion arc
|
| 308 |
+
pattern_insight = _analyze_emotion_pattern(emotion_arc) if emotion_arc else None
|
| 309 |
+
|
| 310 |
+
for i, intervention in enumerate(interventions):
|
| 311 |
+
# Personalize the prompt if we have context
|
| 312 |
+
prompt = intervention["prompt"]
|
| 313 |
+
|
| 314 |
+
# Add memory-based personalization
|
| 315 |
+
if pattern_insight and i == 0: # First intervention gets pattern insight
|
| 316 |
+
output += f"• **{intervention['action']}** \n"
|
| 317 |
+
output += f" ↳ *{pattern_insight}*\n\n"
|
| 318 |
+
elif entities.get("person") and "reach out" in intervention["action"].lower():
|
| 319 |
+
# Personalize "reach out" with mentioned person
|
| 320 |
+
person = entities["person"][0]
|
| 321 |
+
output += f"• **{intervention['action']}** \n"
|
| 322 |
+
output += f" ↳ *Could {person} be someone to talk to about this?*\n\n"
|
| 323 |
+
elif entities.get("place") and "connection" in need_type:
|
| 324 |
+
# Reference places where user felt better
|
| 325 |
+
place = entities["place"][0]
|
| 326 |
+
output += f"• **{intervention['action']}** \n"
|
| 327 |
+
output += f" ↳ *What about {place} helped you feel more connected?*\n\n"
|
| 328 |
+
else:
|
| 329 |
+
# Use default prompt
|
| 330 |
+
output += f"• **{intervention['action']}** \n"
|
| 331 |
+
output += f" ↳ *{prompt}*\n\n"
|
| 332 |
+
|
| 333 |
+
output += "*These are just ideas — only you know what fits right now.*"
|
| 334 |
+
|
| 335 |
+
return output
|
| 336 |
+
|
| 337 |
+
|
| 338 |
+
def _extract_entities(text: str) -> Dict[str, List[str]]:
|
| 339 |
+
"""
|
| 340 |
+
Simple entity extraction - finds names, places mentioned.
|
| 341 |
+
(Could be enhanced with NER later, but regex works for now)
|
| 342 |
+
"""
|
| 343 |
+
import re
|
| 344 |
+
|
| 345 |
+
entities = {"person": [], "place": []}
|
| 346 |
+
|
| 347 |
+
if not text:
|
| 348 |
+
return entities
|
| 349 |
+
|
| 350 |
+
# Look for capitalized words that might be names
|
| 351 |
+
# Pattern: capitalized word not at start of sentence
|
| 352 |
+
words = text.split()
|
| 353 |
+
for i, word in enumerate(words):
|
| 354 |
+
# Skip first word and common words
|
| 355 |
+
if i == 0 or word.lower() in ["i", "the", "a", "an", "my", "her", "his"]:
|
| 356 |
+
continue
|
| 357 |
+
# Check if capitalized and looks like a name
|
| 358 |
+
if word[0].isupper() and len(word) > 2:
|
| 359 |
+
entities["person"].append(word.rstrip(".,!?"))
|
| 360 |
+
|
| 361 |
+
return entities
|
| 362 |
+
|
| 363 |
+
|
| 364 |
+
def _analyze_emotion_pattern(emotion_arc: Dict) -> str:
|
| 365 |
+
"""
|
| 366 |
+
Analyze emotion trajectory for patterns and insights.
|
| 367 |
+
Returns a personalized prompt based on what we've seen.
|
| 368 |
+
"""
|
| 369 |
+
trajectory = emotion_arc.get("trajectory", [])
|
| 370 |
+
|
| 371 |
+
if not trajectory or len(trajectory) < 2:
|
| 372 |
+
return None
|
| 373 |
+
|
| 374 |
+
# Check if emotions are getting worse
|
| 375 |
+
recent_valence = [e.get("valence", 0) for e in trajectory[-3:]]
|
| 376 |
+
if len(recent_valence) >= 2:
|
| 377 |
+
if all(
|
| 378 |
+
recent_valence[i] < recent_valence[i - 1]
|
| 379 |
+
for i in range(1, len(recent_valence))
|
| 380 |
+
):
|
| 381 |
+
return "Your emotions have been getting heavier - what could help you shift this pattern?"
|
| 382 |
+
|
| 383 |
+
# Check if stuck in same emotion
|
| 384 |
+
recent_labels = [e.get("primary_label") for e in trajectory[-3:]]
|
| 385 |
+
if len(set(recent_labels)) == 1:
|
| 386 |
+
emotion = recent_labels[0]
|
| 387 |
+
if emotion in ["anxious", "sad"]:
|
| 388 |
+
return f"You've been feeling {emotion} for a while now - what's one thing that might give you a break from this?"
|
| 389 |
+
|
| 390 |
+
# Check if there was a better moment
|
| 391 |
+
best_valence = max(e.get("valence", -1) for e in trajectory)
|
| 392 |
+
if best_valence > 0:
|
| 393 |
+
best_moment = [e for e in trajectory if e.get("valence") == best_valence][0]
|
| 394 |
+
snippet = best_moment.get("text", "")[:50]
|
| 395 |
+
return f'Earlier when you said "{snippet}..." you seemed lighter - what was different then?'
|
| 396 |
+
|
| 397 |
+
return None
|
utils/intervention_lexicon.py.broken2
ADDED
|
@@ -0,0 +1,397 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Intervention Lexicon: Need-based actionable suggestions
|
| 3 |
+
Maps psychological needs to concrete, evidence-based interventions.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
from typing import List, Dict
|
| 7 |
+
|
| 8 |
+
# Intervention strategies for each need
|
| 9 |
+
INTERVENTIONS = {
|
| 10 |
+
"autonomy": {
|
| 11 |
+
"label": "Autonomy/Control",
|
| 12 |
+
"icon": "⚖️",
|
| 13 |
+
"strategies": [
|
| 14 |
+
{
|
| 15 |
+
"action": "Set a boundary",
|
| 16 |
+
"prompt": "What would it sound like to say 'no' here?",
|
| 17 |
+
"context": "blocked, powerless, micromanaged",
|
| 18 |
+
"evidence": "Self-Determination Theory: autonomy is core psychological need",
|
| 19 |
+
},
|
| 20 |
+
{
|
| 21 |
+
"action": "Reclaim decision-making",
|
| 22 |
+
"prompt": "What's one choice that's yours to make right now?",
|
| 23 |
+
"context": "dismissed, underutilized",
|
| 24 |
+
"evidence": "Locus of control research",
|
| 25 |
+
},
|
| 26 |
+
{
|
| 27 |
+
"action": "Name what you need",
|
| 28 |
+
"prompt": "If you could ask for one thing to change, what would it be?",
|
| 29 |
+
"context": "powerless, micromanaged",
|
| 30 |
+
"evidence": "Assertiveness training principles",
|
| 31 |
+
},
|
| 32 |
+
{
|
| 33 |
+
"action": "Take one small action",
|
| 34 |
+
"prompt": "What's the smallest step you could take that's fully in your control?",
|
| 35 |
+
"context": "blocked, powerless",
|
| 36 |
+
"evidence": "Behavioral activation therapy",
|
| 37 |
+
},
|
| 38 |
+
],
|
| 39 |
+
},
|
| 40 |
+
"connection": {
|
| 41 |
+
"label": "Connection/Belonging",
|
| 42 |
+
"icon": "🗣️",
|
| 43 |
+
"strategies": [
|
| 44 |
+
{
|
| 45 |
+
"action": "Text someone who's seen you at your worst",
|
| 46 |
+
"prompt": "Not the person you perform for - who actually knows your mess?",
|
| 47 |
+
"context": "isolated, rejected, misunderstood",
|
| 48 |
+
"evidence": "Social support research, attachment theory",
|
| 49 |
+
},
|
| 50 |
+
{
|
| 51 |
+
"action": "Voice memo instead of typing",
|
| 52 |
+
"prompt": "Sometimes hearing your own voice say it out loud hits different",
|
| 53 |
+
"context": "isolated, disconnected",
|
| 54 |
+
"evidence": "Expressive writing research, self-disclosure benefits",
|
| 55 |
+
},
|
| 56 |
+
{
|
| 57 |
+
"action": "Do your usual connection ritual",
|
| 58 |
+
"prompt": "Coffee? Walk? Game? Your body remembers what works even when your brain doesn't",
|
| 59 |
+
"context": "isolated, rejected",
|
| 60 |
+
"evidence": "Behavioral activation, habit-based connection",
|
| 61 |
+
},
|
| 62 |
+
{
|
| 63 |
+
"action": "Show up somewhere people know your face",
|
| 64 |
+
"prompt": "Even if you don't talk much - being seen counts",
|
| 65 |
+
"context": "isolated, disconnected",
|
| 66 |
+
"evidence": "Social presence research, third places theory",
|
| 67 |
+
},
|
| 68 |
+
{
|
| 69 |
+
"action": "Send the messy text",
|
| 70 |
+
"prompt": "Not the cleaned-up version - the real one. Someone can handle it.",
|
| 71 |
+
"context": "misunderstood, rejected",
|
| 72 |
+
"evidence": "Vulnerability research (Brené Brown), authentic relating",
|
| 73 |
+
},
|
| 74 |
+
],
|
| 75 |
+
},
|
| 76 |
+
"security": {
|
| 77 |
+
"label": "Security/Clarity",
|
| 78 |
+
"icon": "🛡️",
|
| 79 |
+
"context": "isolated, disconnected",
|
| 80 |
+
"evidence": "Loneliness intervention research",
|
| 81 |
+
},
|
| 82 |
+
{
|
| 83 |
+
"action": "Ask for what you need",
|
| 84 |
+
"prompt": "What would it sound like to say 'I need you to...'?",
|
| 85 |
+
"context": "rejected, misunderstood",
|
| 86 |
+
"evidence": "Relationship research (Gottman)",
|
| 87 |
+
},
|
| 88 |
+
],
|
| 89 |
+
},
|
| 90 |
+
"security": {
|
| 91 |
+
"label": "Security/Clarity",
|
| 92 |
+
"icon": "🛡️",
|
| 93 |
+
"strategies": [
|
| 94 |
+
{
|
| 95 |
+
"action": "Get more information",
|
| 96 |
+
"prompt": "What's one question that would help you feel more grounded?",
|
| 97 |
+
"context": "uncertain, unstable",
|
| 98 |
+
"evidence": "Information-seeking coping (Lazarus & Folkman)",
|
| 99 |
+
},
|
| 100 |
+
{
|
| 101 |
+
"action": "Make a backup plan",
|
| 102 |
+
"prompt": "If the worst happened, what would you do next?",
|
| 103 |
+
"context": "threatened, unstable, loss",
|
| 104 |
+
"evidence": "Cognitive-behavioral therapy, anxiety reduction",
|
| 105 |
+
},
|
| 106 |
+
{
|
| 107 |
+
"action": "Identify what you can control",
|
| 108 |
+
"prompt": "What's one thing that's yours to decide or influence?",
|
| 109 |
+
"context": "uncertain, threatened",
|
| 110 |
+
"evidence": "Control theory, stress management",
|
| 111 |
+
},
|
| 112 |
+
{
|
| 113 |
+
"action": "Ground in the present",
|
| 114 |
+
"prompt": "What's actually true right now, in this moment?",
|
| 115 |
+
"context": "threatened, uncertain",
|
| 116 |
+
"evidence": "Mindfulness-based interventions",
|
| 117 |
+
},
|
| 118 |
+
{
|
| 119 |
+
"action": "Reach out for support",
|
| 120 |
+
"prompt": "Who could you talk to about this uncertainty?",
|
| 121 |
+
"context": "threatened, uncertain, loss",
|
| 122 |
+
"evidence": "Social support buffering effect",
|
| 123 |
+
},
|
| 124 |
+
],
|
| 125 |
+
},
|
| 126 |
+
"rest": {
|
| 127 |
+
"label": "Rest/Boundaries",
|
| 128 |
+
"icon": "🛌",
|
| 129 |
+
"strategies": [
|
| 130 |
+
{
|
| 131 |
+
"action": "Say 'no' to something",
|
| 132 |
+
"prompt": "What's one thing you could decline or delay?",
|
| 133 |
+
"context": "overwhelmed, exhausted, overextended",
|
| 134 |
+
"evidence": "Burnout prevention research",
|
| 135 |
+
},
|
| 136 |
+
{
|
| 137 |
+
"action": "Ask for help",
|
| 138 |
+
"prompt": "What's one thing someone else could take off your plate?",
|
| 139 |
+
"context": "overwhelmed, exhausted",
|
| 140 |
+
"evidence": "Social support, task delegation",
|
| 141 |
+
},
|
| 142 |
+
{
|
| 143 |
+
"action": "Set a boundary",
|
| 144 |
+
"prompt": "What boundary do you need to protect your energy?",
|
| 145 |
+
"context": "boundary_violated, overextended",
|
| 146 |
+
"evidence": "Boundary research, relationship psychology",
|
| 147 |
+
},
|
| 148 |
+
{
|
| 149 |
+
"action": "Take a real break",
|
| 150 |
+
"prompt": "When could you actually stop for 15 minutes today?",
|
| 151 |
+
"context": "exhausted, overwhelmed",
|
| 152 |
+
"evidence": "Recovery research, rest benefits",
|
| 153 |
+
},
|
| 154 |
+
{
|
| 155 |
+
"action": "Name what's draining you",
|
| 156 |
+
"prompt": "What's taking the most from you right now?",
|
| 157 |
+
"context": "exhausted, overextended",
|
| 158 |
+
"evidence": "Energy management, self-awareness",
|
| 159 |
+
},
|
| 160 |
+
],
|
| 161 |
+
},
|
| 162 |
+
"recognition": {
|
| 163 |
+
"label": "Recognition/Value",
|
| 164 |
+
"icon": "✨",
|
| 165 |
+
"strategies": [
|
| 166 |
+
{
|
| 167 |
+
"action": "Acknowledge yourself",
|
| 168 |
+
"prompt": "What did you do well here, even if no one noticed?",
|
| 169 |
+
"context": "unappreciated, overlooked, inadequate",
|
| 170 |
+
"evidence": "Self-compassion research (Neff)",
|
| 171 |
+
},
|
| 172 |
+
{
|
| 173 |
+
"action": "Ask for feedback",
|
| 174 |
+
"prompt": "Who could you ask: 'How did you see that?'",
|
| 175 |
+
"context": "unappreciated, inadequate",
|
| 176 |
+
"evidence": "Feedback-seeking behavior research",
|
| 177 |
+
},
|
| 178 |
+
{
|
| 179 |
+
"action": "Share your contribution",
|
| 180 |
+
"prompt": "What's one thing you could make visible?",
|
| 181 |
+
"context": "overlooked, unappreciated",
|
| 182 |
+
"evidence": "Visibility in the workplace research",
|
| 183 |
+
},
|
| 184 |
+
{
|
| 185 |
+
"action": "Separate their view from your worth",
|
| 186 |
+
"prompt": "What do you know about your value, regardless of their reaction?",
|
| 187 |
+
"context": "criticized, inadequate",
|
| 188 |
+
"evidence": "Cognitive restructuring (CBT)",
|
| 189 |
+
},
|
| 190 |
+
{
|
| 191 |
+
"action": "Find recognition elsewhere",
|
| 192 |
+
"prompt": "Where else do people see your value?",
|
| 193 |
+
"context": "unappreciated, overlooked",
|
| 194 |
+
"evidence": "Multiple life domains, resilience research",
|
| 195 |
+
},
|
| 196 |
+
],
|
| 197 |
+
},
|
| 198 |
+
}
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
def get_interventions(
|
| 202 |
+
need_type: str, contexts: List[str] = None, limit: int = 3
|
| 203 |
+
) -> List[Dict]:
|
| 204 |
+
"""
|
| 205 |
+
Get contextually appropriate interventions for a detected need.
|
| 206 |
+
|
| 207 |
+
Args:
|
| 208 |
+
need_type: One of autonomy, connection, security, rest, recognition
|
| 209 |
+
contexts: List of detected contexts (e.g., ["isolated", "rejected"])
|
| 210 |
+
limit: Maximum number of interventions to return
|
| 211 |
+
|
| 212 |
+
Returns:
|
| 213 |
+
List of intervention dicts with action, prompt, context, evidence
|
| 214 |
+
"""
|
| 215 |
+
if need_type not in INTERVENTIONS:
|
| 216 |
+
return []
|
| 217 |
+
|
| 218 |
+
strategies = INTERVENTIONS[need_type]["strategies"]
|
| 219 |
+
|
| 220 |
+
# If contexts provided, prioritize matching interventions
|
| 221 |
+
if contexts:
|
| 222 |
+
scored_strategies = []
|
| 223 |
+
for strategy in strategies:
|
| 224 |
+
# Count how many user contexts match this strategy's contexts
|
| 225 |
+
strategy_contexts = set(strategy["context"].split(", "))
|
| 226 |
+
user_contexts = set(contexts)
|
| 227 |
+
matches = len(strategy_contexts & user_contexts)
|
| 228 |
+
scored_strategies.append((matches, strategy))
|
| 229 |
+
|
| 230 |
+
# Sort by match count (descending), then take top N
|
| 231 |
+
scored_strategies.sort(key=lambda x: x[0], reverse=True)
|
| 232 |
+
return [s[1] for s in scored_strategies[:limit]]
|
| 233 |
+
|
| 234 |
+
# No contexts - return first N strategies
|
| 235 |
+
return strategies[:limit]
|
| 236 |
+
|
| 237 |
+
|
| 238 |
+
def should_show_interventions(
|
| 239 |
+
confidence: float,
|
| 240 |
+
message_count: int,
|
| 241 |
+
emotional_intensity: float = None,
|
| 242 |
+
min_messages: int = 2, # Simplified: just wait one exchange
|
| 243 |
+
min_confidence: float = 0.70, # Simplified: lower bar
|
| 244 |
+
min_arousal: float = 0.40, # Simplified: catch more cases
|
| 245 |
+
) -> bool:
|
| 246 |
+
"""
|
| 247 |
+
SIMPLIFIED for demo: Show interventions when there's a clear need.
|
| 248 |
+
|
| 249 |
+
Args:
|
| 250 |
+
confidence: Confidence in the detected need (0-1)
|
| 251 |
+
message_count: Number of messages in conversation
|
| 252 |
+
emotional_intensity: Arousal level (0-1)
|
| 253 |
+
min_messages: Minimum messages (default: 2)
|
| 254 |
+
min_confidence: Minimum confidence (default: 0.70)
|
| 255 |
+
min_arousal: Minimum arousal (default: 0.40)
|
| 256 |
+
|
| 257 |
+
Returns:
|
| 258 |
+
True if interventions should be displayed
|
| 259 |
+
"""
|
| 260 |
+
# Simple checks - if there's a strong need and emotions, show help
|
| 261 |
+
if message_count < min_messages:
|
| 262 |
+
return False
|
| 263 |
+
|
| 264 |
+
if confidence < min_confidence:
|
| 265 |
+
return False
|
| 266 |
+
|
| 267 |
+
if emotional_intensity is not None and emotional_intensity < min_arousal:
|
| 268 |
+
return False
|
| 269 |
+
|
| 270 |
+
return True
|
| 271 |
+
|
| 272 |
+
|
| 273 |
+
def format_interventions(
|
| 274 |
+
need_type: str,
|
| 275 |
+
interventions: List[Dict],
|
| 276 |
+
confidence: float,
|
| 277 |
+
emotion_arc: Dict = None,
|
| 278 |
+
user_text: str = None,
|
| 279 |
+
) -> str:
|
| 280 |
+
"""
|
| 281 |
+
Format interventions for display in the UI.
|
| 282 |
+
Uses memory/emotion arc to personalize suggestions when available.
|
| 283 |
+
|
| 284 |
+
Args:
|
| 285 |
+
need_type: The detected need type
|
| 286 |
+
interventions: List of intervention dicts
|
| 287 |
+
confidence: Confidence in the need detection
|
| 288 |
+
emotion_arc: Optional emotion trajectory from memory
|
| 289 |
+
user_text: Optional current user message for context extraction
|
| 290 |
+
|
| 291 |
+
Returns:
|
| 292 |
+
Formatted markdown string with personalized interventions
|
| 293 |
+
"""
|
| 294 |
+
if not interventions:
|
| 295 |
+
return ""
|
| 296 |
+
|
| 297 |
+
need_info = INTERVENTIONS[need_type]
|
| 298 |
+
icon = need_info["icon"]
|
| 299 |
+
label = need_info["label"]
|
| 300 |
+
|
| 301 |
+
output = f"\n\n---\n\n"
|
| 302 |
+
output += f"💡 **Based on your need for {icon} {label}:**\n\n"
|
| 303 |
+
|
| 304 |
+
# Extract names/places from current text for personalization
|
| 305 |
+
entities = _extract_entities(user_text) if user_text else {}
|
| 306 |
+
|
| 307 |
+
# Check if there's a pattern in emotion arc
|
| 308 |
+
pattern_insight = _analyze_emotion_pattern(emotion_arc) if emotion_arc else None
|
| 309 |
+
|
| 310 |
+
for i, intervention in enumerate(interventions):
|
| 311 |
+
# Personalize the prompt if we have context
|
| 312 |
+
prompt = intervention["prompt"]
|
| 313 |
+
|
| 314 |
+
# Add memory-based personalization
|
| 315 |
+
if pattern_insight and i == 0: # First intervention gets pattern insight
|
| 316 |
+
output += f"• **{intervention['action']}** \n"
|
| 317 |
+
output += f" ↳ *{pattern_insight}*\n\n"
|
| 318 |
+
elif entities.get("person") and "reach out" in intervention["action"].lower():
|
| 319 |
+
# Personalize "reach out" with mentioned person
|
| 320 |
+
person = entities["person"][0]
|
| 321 |
+
output += f"• **{intervention['action']}** \n"
|
| 322 |
+
output += f" ↳ *Could {person} be someone to talk to about this?*\n\n"
|
| 323 |
+
elif entities.get("place") and "connection" in need_type:
|
| 324 |
+
# Reference places where user felt better
|
| 325 |
+
place = entities["place"][0]
|
| 326 |
+
output += f"• **{intervention['action']}** \n"
|
| 327 |
+
output += f" ↳ *What about {place} helped you feel more connected?*\n\n"
|
| 328 |
+
else:
|
| 329 |
+
# Use default prompt
|
| 330 |
+
output += f"• **{intervention['action']}** \n"
|
| 331 |
+
output += f" ↳ *{prompt}*\n\n"
|
| 332 |
+
|
| 333 |
+
output += "*These are just ideas — only you know what fits right now.*"
|
| 334 |
+
|
| 335 |
+
return output
|
| 336 |
+
|
| 337 |
+
|
| 338 |
+
def _extract_entities(text: str) -> Dict[str, List[str]]:
|
| 339 |
+
"""
|
| 340 |
+
Simple entity extraction - finds names, places mentioned.
|
| 341 |
+
(Could be enhanced with NER later, but regex works for now)
|
| 342 |
+
"""
|
| 343 |
+
import re
|
| 344 |
+
|
| 345 |
+
entities = {"person": [], "place": []}
|
| 346 |
+
|
| 347 |
+
if not text:
|
| 348 |
+
return entities
|
| 349 |
+
|
| 350 |
+
# Look for capitalized words that might be names
|
| 351 |
+
# Pattern: capitalized word not at start of sentence
|
| 352 |
+
words = text.split()
|
| 353 |
+
for i, word in enumerate(words):
|
| 354 |
+
# Skip first word and common words
|
| 355 |
+
if i == 0 or word.lower() in ["i", "the", "a", "an", "my", "her", "his"]:
|
| 356 |
+
continue
|
| 357 |
+
# Check if capitalized and looks like a name
|
| 358 |
+
if word[0].isupper() and len(word) > 2:
|
| 359 |
+
entities["person"].append(word.rstrip(".,!?"))
|
| 360 |
+
|
| 361 |
+
return entities
|
| 362 |
+
|
| 363 |
+
|
| 364 |
+
def _analyze_emotion_pattern(emotion_arc: Dict) -> str:
|
| 365 |
+
"""
|
| 366 |
+
Analyze emotion trajectory for patterns and insights.
|
| 367 |
+
Returns a personalized prompt based on what we've seen.
|
| 368 |
+
"""
|
| 369 |
+
trajectory = emotion_arc.get("trajectory", [])
|
| 370 |
+
|
| 371 |
+
if not trajectory or len(trajectory) < 2:
|
| 372 |
+
return None
|
| 373 |
+
|
| 374 |
+
# Check if emotions are getting worse
|
| 375 |
+
recent_valence = [e.get("valence", 0) for e in trajectory[-3:]]
|
| 376 |
+
if len(recent_valence) >= 2:
|
| 377 |
+
if all(
|
| 378 |
+
recent_valence[i] < recent_valence[i - 1]
|
| 379 |
+
for i in range(1, len(recent_valence))
|
| 380 |
+
):
|
| 381 |
+
return "Your emotions have been getting heavier - what could help you shift this pattern?"
|
| 382 |
+
|
| 383 |
+
# Check if stuck in same emotion
|
| 384 |
+
recent_labels = [e.get("primary_label") for e in trajectory[-3:]]
|
| 385 |
+
if len(set(recent_labels)) == 1:
|
| 386 |
+
emotion = recent_labels[0]
|
| 387 |
+
if emotion in ["anxious", "sad"]:
|
| 388 |
+
return f"You've been feeling {emotion} for a while now - what's one thing that might give you a break from this?"
|
| 389 |
+
|
| 390 |
+
# Check if there was a better moment
|
| 391 |
+
best_valence = max(e.get("valence", -1) for e in trajectory)
|
| 392 |
+
if best_valence > 0:
|
| 393 |
+
best_moment = [e for e in trajectory if e.get("valence") == best_valence][0]
|
| 394 |
+
snippet = best_moment.get("text", "")[:50]
|
| 395 |
+
return f'Earlier when you said "{snippet}..." you seemed lighter - what was different then?'
|
| 396 |
+
|
| 397 |
+
return None
|
utils/mcp_client.py
ADDED
|
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# utils/mcp_client.py
|
| 2 |
+
import asyncio
|
| 3 |
+
from typing import Dict, Any, List, Optional
|
| 4 |
+
from mcp import ClientSession, StdioServerParameters
|
| 5 |
+
from mcp.client.stdio import stdio_client
|
| 6 |
+
|
| 7 |
+
class MCPMux:
|
| 8 |
+
def __init__(self):
|
| 9 |
+
self._servers = {}
|
| 10 |
+
self._tool_index = {}
|
| 11 |
+
self._streams = {} # Store streams to keep them alive
|
| 12 |
+
|
| 13 |
+
async def connect_stdio(self, name: str, command: str, args: Optional[List[str]] = None, env: Optional[Dict[str,str]] = None):
|
| 14 |
+
params = StdioServerParameters(command=command, args=args or [], env=env or {})
|
| 15 |
+
|
| 16 |
+
# Use async context manager for stdio_client
|
| 17 |
+
stdio = stdio_client(params)
|
| 18 |
+
read, write = await stdio.__aenter__()
|
| 19 |
+
|
| 20 |
+
# Store the context manager to keep streams alive
|
| 21 |
+
self._streams[name] = stdio
|
| 22 |
+
|
| 23 |
+
# Create session
|
| 24 |
+
session = ClientSession(read, write)
|
| 25 |
+
|
| 26 |
+
# Start session
|
| 27 |
+
await session.__aenter__()
|
| 28 |
+
|
| 29 |
+
# Initialize
|
| 30 |
+
await session.initialize()
|
| 31 |
+
|
| 32 |
+
# List tools
|
| 33 |
+
tools_result = await session.list_tools()
|
| 34 |
+
tools = tools_result.tools
|
| 35 |
+
|
| 36 |
+
# Store session and tools
|
| 37 |
+
self._servers[name] = {"session": session, "tools": {t.name: t for t in tools}}
|
| 38 |
+
for t in tools:
|
| 39 |
+
self._tool_index[t.name] = name
|
| 40 |
+
return tools
|
| 41 |
+
|
| 42 |
+
async def call(self, tool_name: str, arguments: Dict[str, Any]) -> Any:
|
| 43 |
+
if tool_name not in self._tool_index:
|
| 44 |
+
raise ValueError(f"Unknown tool: {tool_name}")
|
| 45 |
+
server_name = self._tool_index[tool_name]
|
| 46 |
+
session = self._servers[server_name]["session"]
|
| 47 |
+
|
| 48 |
+
print(f"🔧 MCP call: {server_name}.{tool_name}({arguments})")
|
| 49 |
+
res = await session.call_tool(tool_name, arguments=arguments)
|
| 50 |
+
print(f"✅ MCP response type: {type(res)}, hasattr content: {hasattr(res, 'content')}")
|
| 51 |
+
|
| 52 |
+
parts = []
|
| 53 |
+
for c in res.content:
|
| 54 |
+
if hasattr(c, "text") and c.text:
|
| 55 |
+
parts.append(c.text)
|
| 56 |
+
elif hasattr(c, "data"):
|
| 57 |
+
parts.append(str(c.data))
|
| 58 |
+
|
| 59 |
+
result = "\n".join(parts) if parts else "(no content)"
|
| 60 |
+
print(f"📤 MCP result: {result[:200]}...")
|
| 61 |
+
return result
|
| 62 |
+
|
| 63 |
+
async def list_all_tools(self):
|
| 64 |
+
out = {}
|
| 65 |
+
for name, s in self._servers.items():
|
| 66 |
+
out[name] = list(s["tools"].keys())
|
| 67 |
+
return out
|
| 68 |
+
|
| 69 |
+
async def close(self):
|
| 70 |
+
for s in self._servers.values():
|
| 71 |
+
await s["session"].__aexit__(None, None, None)
|
| 72 |
+
|
| 73 |
+
for s in self._servers.values():
|
| 74 |
+
await s["conn"].close()
|
utils/memory.py
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# utils/memory.py
|
| 2 |
+
import json
|
| 3 |
+
import os
|
| 4 |
+
import time
|
| 5 |
+
|
| 6 |
+
FILE = "memory.json"
|
| 7 |
+
|
| 8 |
+
def load():
|
| 9 |
+
if os.path.exists(FILE):
|
| 10 |
+
with open(FILE) as f:
|
| 11 |
+
return json.load(f)
|
| 12 |
+
return []
|
| 13 |
+
|
| 14 |
+
def save(history):
|
| 15 |
+
with open(FILE, "w") as f:
|
| 16 |
+
json.dump(history[-5:], f)
|
| 17 |
+
|
| 18 |
+
def remember(message, reply):
|
| 19 |
+
data = load()
|
| 20 |
+
data.append({"t": int(time.time()), "u": message, "a": reply})
|
| 21 |
+
save(data)
|
| 22 |
+
return data
|
utils/needs_lexicon.py
ADDED
|
@@ -0,0 +1,648 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Implicit Needs Detection for Ghost Malone
|
| 3 |
+
Maps emotional patterns + context keywords to underlying psychological needs.
|
| 4 |
+
|
| 5 |
+
STRATEGIC FRAMEWORK:
|
| 6 |
+
Based on Self-Determination Theory (Ryan & Deci) + Maslow's Hierarchy
|
| 7 |
+
|
| 8 |
+
5 Core Needs:
|
| 9 |
+
1. AUTONOMY - Control, choice, self-direction
|
| 10 |
+
2. CONNECTION - Belonging, relationships, being seen
|
| 11 |
+
3. SECURITY - Safety, clarity, predictability
|
| 12 |
+
4. REST - Recovery, boundaries, capacity
|
| 13 |
+
5. RECOGNITION - Validation, competence, mattering
|
| 14 |
+
|
| 15 |
+
Context Detection Strategy:
|
| 16 |
+
- BARRIERS: What's blocking the need? (can't, won't let, prevented)
|
| 17 |
+
- DEFICITS: What's missing? (nobody, nothing, no one)
|
| 18 |
+
- THREATS: What's at risk? (unsafe, unstable, unpredictable)
|
| 19 |
+
- OVERLOAD: What's too much? (overwhelmed, exhausted, too many)
|
| 20 |
+
- DISMISSAL: What's being ignored? (dismissed, invisible, unappreciated)
|
| 21 |
+
|
| 22 |
+
Constitutional AI Principle: Respects human dignity by treating emotions as signals
|
| 23 |
+
of legitimate needs, not surface-level problems.
|
| 24 |
+
"""
|
| 25 |
+
|
| 26 |
+
from typing import List, Dict
|
| 27 |
+
import re
|
| 28 |
+
|
| 29 |
+
# STRATEGIC CONTEXT PATTERNS - organized by psychological dimension
|
| 30 |
+
CONTEXT_PATTERNS = {
|
| 31 |
+
# ===== AUTONOMY CONTEXTS =====
|
| 32 |
+
# Barriers to self-direction
|
| 33 |
+
"blocked": r"(can't|won't let|prevented|stopped|blocked|restricted|controlled|controlling me|forced|have to|must|don't give|won't give|not letting|not allowed|doesn't give|pressured to|feel pressured)",
|
| 34 |
+
"powerless": r"(powerless|helpless|trapped|stuck|no choice|no control|out of.{0,15}control|feels.{0,15}out of control|don't have the tools|not much opportunity|no say)",
|
| 35 |
+
"dismissed": r"(ignored|dismissed|disregarded|invalidated|didn't listen|brushed off|shut down|talked over|doesn't consider|don't consider|nobody trusts|don't trust|people ignore|ignore me|cut me off|looked.{0,15}through me|right through me)",
|
| 36 |
+
"underutilized": r"(know i can|i can do|capable of|able to do|qualified|skills|talent|waste|potential|not enough challenge|bored|underused|so boring|below my ability|way below|could do.{0,15}more|living up to)",
|
| 37 |
+
"micromanaged": r"(micromanag|hovering|checking|breathing down|second-guess|don't trust me|checks every|watching every)",
|
| 38 |
+
# ===== CONNECTION CONTEXTS =====
|
| 39 |
+
# Deficits in belonging/relationships
|
| 40 |
+
"isolated": r"(alone|lonely|isolated|by myself|left out|excluded|without me|on my own|don't have anyone|no one to talk|invisible to|no one there|nobody there)",
|
| 41 |
+
"rejected": r"(rejected|abandoned|left me|left behind|they left|ghosted|dumped|unwanted|unloved|won't talk|ignoring|avoiding me|cut off|don't.{0,15}respond|doesn't.{0,15}respond|won't.{0,15}respond|not.{0,15}respond(?:ed|ing)?|hasn't.{0,15}respond|didn't.{0,15}respond|not.{0,15}reply|not.{0,15}repl(?:ied|ying)|not.{0,15}answer(?:ed|ing)?|avoid me|seem to avoid|wasn't invited|didn't invite|unanswered|go unanswered|make excuses|not texting.{0,15}back|won't text)",
|
| 42 |
+
"misunderstood": r"(misunderstood|don't get it|don't understand|not listening|not hearing|missing the point|doesn't.{0,15}listen|won't.{0,15}listen|different language|speaking a different|doesn't.{0,15}hear|misinterpret|nobody.{0,15}understands|nobody.{0,15}knows|nobody.{0,15}interested|doesn't.{0,15}share|doesn't.{0,15}talk|won't.{0,15}talk|doesn't.{0,15}open up)",
|
| 43 |
+
"singled_out": r"(everyone else|everybody else|talks to everyone|except me|only me|just me|do things without|left behind|don't invite|didn't invite|don't call me|leaves me out|not included|don't pass|doesn't pass|won't pass|outsider|feel like.{0,15}outsider|make plans without|plans without including|always the one reaching out|don't include)",
|
| 44 |
+
"disconnected": r"(disconnected|distant|drift|growing apart|not close|lost touch|pulling away|pull away|hard to.{0,15}connection|hard to connect|can't connect)",
|
| 45 |
+
# ===== SECURITY CONTEXTS =====
|
| 46 |
+
# Threats to safety/clarity
|
| 47 |
+
"uncertain": r"(don't know|uncertain|unsure|unclear|confused|no idea|what if|can't tell|ambiguous|what to do|where do i go|what next|what do i do|can't predict|unpredictable|what's going to happen|what will happen|going to happen)",
|
| 48 |
+
"unstable": r"(unstable|unpredictable|chaotic|all over|everything's changing|up in the air|falling apart|like always|never on time|unreliable|keeps changing|constantly changing|shifts again|everything shifts|stays stable|nothing.{0,15}stable|shifting ground|constantly shifting)",
|
| 49 |
+
"threatened": r"(threatened|unsafe|danger|risk|scared|scary|vulnerable|exposed|at risk|pain again|go through that|rot in hell|worried|worrying|something bad|threats|not protected|not secure|don't feel.{0,15}secure)",
|
| 50 |
+
"loss": r"(losing|lost|losing grip|slipping away|falling apart|coming undone)",
|
| 51 |
+
# ===== REST CONTEXTS =====
|
| 52 |
+
# Overload on capacity
|
| 53 |
+
"overwhelmed": r"(overwhelmed|too much|can't handle|drowning|swamped|buried|crowded|crowd|too many people|overstimulat|too much work|coming at me|at me all at once)",
|
| 54 |
+
"exhausted": r"(exhausted|drained|burnt out|can't anymore|running on empty|wiped|spent|depleted|freaking tired|so tired|running on fumes|on fumes|no energy|don't have.{0,15}energy)",
|
| 55 |
+
"overextended": r"(overextended|overstretched|spread thin|doing too much|no time|juggling|can't keep up|keep up with)",
|
| 56 |
+
"boundary_violated": r"(always asking|never stops|can't say no|taking advantage|demanding|intrusive|available 24|expect.{0,15}available|need space|won't give.{0,15}space|boundaries.{0,15}ignored|kept.{0,15}insisting|kept.{0,15}texting|kept.{0,15}calling|i said no|asked for.{0,15}boundaries)",
|
| 57 |
+
# ===== RECOGNITION CONTEXTS =====
|
| 58 |
+
# Dismissal of value/competence
|
| 59 |
+
"unappreciated": r"(unappreciated|taken for granted|not valued|not recognized|invisible|thankless|nobody appreciates|no one appreciates|zero recognition|get.{0,15}recognition|work.{0,15}hard|not appreciated|don't feel appreciated|no.{0,15}recognition|no sense of recognition)",
|
| 60 |
+
"inadequate": r"(not good enough|inadequate|failing|not enough|disappointing|falling short|incompetent|doubt.{0,15}have what it takes|don't feel capable|never enough|nothing.{0,15}enough)",
|
| 61 |
+
"overlooked": r"(overlooked|passed over|ignored|not noticed|nobody notices|no one notices|credit|someone else|didn't acknowledge)",
|
| 62 |
+
"criticized": r"(criticized|judged|attacked|picked apart|nothing right|always wrong|nitpick|point out.{0,15}mistake|only.{0,15}mistake|compared.{0,15}unfavorably|compared to others|offended|offensive|rude|disrespected)",
|
| 63 |
+
}
|
| 64 |
+
|
| 65 |
+
# Needs taxonomy (based on Maslow + Self-Determination Theory)
|
| 66 |
+
NEEDS = {
|
| 67 |
+
"autonomy": {
|
| 68 |
+
"label": "Autonomy/Control",
|
| 69 |
+
"icon": "⚖️",
|
| 70 |
+
"description": "Need for self-direction and agency over one's life",
|
| 71 |
+
"interventions": [
|
| 72 |
+
"Set a boundary",
|
| 73 |
+
"Say 'no'",
|
| 74 |
+
"Take back decision-making power",
|
| 75 |
+
],
|
| 76 |
+
},
|
| 77 |
+
"connection": {
|
| 78 |
+
"label": "Connection/Belonging",
|
| 79 |
+
"icon": "🗣️",
|
| 80 |
+
"description": "Need for meaningful relationships and social bonds",
|
| 81 |
+
"interventions": [
|
| 82 |
+
"Reach out to someone you trust",
|
| 83 |
+
"Share what you're feeling",
|
| 84 |
+
"Ask for support",
|
| 85 |
+
],
|
| 86 |
+
},
|
| 87 |
+
"security": {
|
| 88 |
+
"label": "Security/Clarity",
|
| 89 |
+
"icon": "🛡️",
|
| 90 |
+
"description": "Need for safety, predictability, and understanding",
|
| 91 |
+
"interventions": [
|
| 92 |
+
"Get more information",
|
| 93 |
+
"Make a plan",
|
| 94 |
+
"Identify what you can control",
|
| 95 |
+
],
|
| 96 |
+
},
|
| 97 |
+
"rest": {
|
| 98 |
+
"label": "Rest/Boundaries",
|
| 99 |
+
"icon": "🛌",
|
| 100 |
+
"description": "Need for recuperation and limits on demands",
|
| 101 |
+
"interventions": [
|
| 102 |
+
"Take a break",
|
| 103 |
+
"Delegate something",
|
| 104 |
+
"Give yourself permission to rest",
|
| 105 |
+
],
|
| 106 |
+
},
|
| 107 |
+
"recognition": {
|
| 108 |
+
"label": "Recognition/Validation",
|
| 109 |
+
"icon": "✨",
|
| 110 |
+
"description": "Need to be seen, valued, and acknowledged",
|
| 111 |
+
"interventions": [
|
| 112 |
+
"Acknowledge your own effort",
|
| 113 |
+
"Ask for feedback",
|
| 114 |
+
"Celebrate small wins",
|
| 115 |
+
],
|
| 116 |
+
},
|
| 117 |
+
}
|
| 118 |
+
|
| 119 |
+
# STRATEGIC NEED INFERENCE RULES
|
| 120 |
+
# Organized by primary need, with emotion+context combinations
|
| 121 |
+
# Using ONLY the 7 actual detected emotions: happy, sad, angry, anxious, tired, love, fear
|
| 122 |
+
NEED_RULES = [
|
| 123 |
+
# ========== AUTONOMY RULES ==========
|
| 124 |
+
# Primary emotion: Anger (blocked goals/control)
|
| 125 |
+
{
|
| 126 |
+
"emotions": ["angry"],
|
| 127 |
+
"contexts": ["blocked", "powerless", "dismissed", "micromanaged"],
|
| 128 |
+
"need": "autonomy",
|
| 129 |
+
"confidence_base": 0.80,
|
| 130 |
+
"reasoning": "Anger signals blocked goals or loss of control",
|
| 131 |
+
},
|
| 132 |
+
{
|
| 133 |
+
"emotions": ["angry", "sad"],
|
| 134 |
+
"contexts": ["powerless", "trapped", "stuck"],
|
| 135 |
+
"need": "autonomy",
|
| 136 |
+
"confidence_base": 0.85,
|
| 137 |
+
"reasoning": "Anger + sadness at powerlessness indicates autonomy deficit",
|
| 138 |
+
},
|
| 139 |
+
{
|
| 140 |
+
"emotions": ["angry"],
|
| 141 |
+
"contexts": ["underutilized", "dismissed"],
|
| 142 |
+
"need": "autonomy",
|
| 143 |
+
"confidence_base": 0.75,
|
| 144 |
+
"reasoning": "Anger at not being allowed to use your capabilities",
|
| 145 |
+
},
|
| 146 |
+
# HIGH PRIORITY: Underutilized should map to autonomy strongly
|
| 147 |
+
{
|
| 148 |
+
"emotions": ["sad", "angry", "anxious", "tired"],
|
| 149 |
+
"contexts": ["underutilized"],
|
| 150 |
+
"need": "autonomy",
|
| 151 |
+
"confidence_base": 0.88,
|
| 152 |
+
"reasoning": "Skills/potential being wasted is autonomy issue",
|
| 153 |
+
},
|
| 154 |
+
{
|
| 155 |
+
"emotions": ["sad"],
|
| 156 |
+
"contexts": ["powerless", "trapped"],
|
| 157 |
+
"need": "autonomy",
|
| 158 |
+
"confidence_base": 0.70,
|
| 159 |
+
"reasoning": "Sadness from lack of control",
|
| 160 |
+
},
|
| 161 |
+
{
|
| 162 |
+
"emotions": ["sad"],
|
| 163 |
+
"contexts": ["blocked", "micromanaged", "dismissed"],
|
| 164 |
+
"need": "autonomy",
|
| 165 |
+
"confidence_base": 0.75,
|
| 166 |
+
"reasoning": "Sadness from being controlled or blocked",
|
| 167 |
+
},
|
| 168 |
+
{
|
| 169 |
+
"emotions": ["tired"],
|
| 170 |
+
"contexts": ["blocked", "powerless", "micromanaged"],
|
| 171 |
+
"need": "autonomy",
|
| 172 |
+
"confidence_base": 0.65,
|
| 173 |
+
"reasoning": "Fatigue from lack of autonomy",
|
| 174 |
+
},
|
| 175 |
+
{
|
| 176 |
+
"emotions": ["anxious"],
|
| 177 |
+
"contexts": ["powerless", "blocked"],
|
| 178 |
+
"need": "autonomy",
|
| 179 |
+
"confidence_base": 0.70,
|
| 180 |
+
"reasoning": "Anxiety from lack of control",
|
| 181 |
+
},
|
| 182 |
+
# ========== CONNECTION RULES ==========
|
| 183 |
+
# Primary emotion: Sadness (relationship deficits)
|
| 184 |
+
# HIGH PRIORITY: "Invisible to people" is connection, not recognition
|
| 185 |
+
{
|
| 186 |
+
"emotions": ["sad", "angry", "anxious"],
|
| 187 |
+
"contexts": ["isolated"],
|
| 188 |
+
"need": "connection",
|
| 189 |
+
"confidence_base": 0.92,
|
| 190 |
+
"reasoning": "Feeling invisible or isolated from people indicates connection deficit",
|
| 191 |
+
},
|
| 192 |
+
{
|
| 193 |
+
"emotions": ["sad"],
|
| 194 |
+
"contexts": ["isolated", "rejected", "misunderstood", "disconnected"],
|
| 195 |
+
"need": "connection",
|
| 196 |
+
"confidence_base": 0.85,
|
| 197 |
+
"reasoning": "Sadness with isolation indicates connection needs",
|
| 198 |
+
},
|
| 199 |
+
{
|
| 200 |
+
"emotions": ["sad"],
|
| 201 |
+
"contexts": ["singled_out"],
|
| 202 |
+
"need": "connection",
|
| 203 |
+
"confidence_base": 0.90,
|
| 204 |
+
"reasoning": "Being excluded from the group",
|
| 205 |
+
},
|
| 206 |
+
# Secondary emotion: Anger (rejection/exclusion)
|
| 207 |
+
{
|
| 208 |
+
"emotions": ["angry"],
|
| 209 |
+
"contexts": ["rejected", "singled_out", "dismissed"],
|
| 210 |
+
"need": "connection",
|
| 211 |
+
"confidence_base": 0.75,
|
| 212 |
+
"reasoning": "Anger from rejection signals need for belonging",
|
| 213 |
+
},
|
| 214 |
+
{
|
| 215 |
+
"emotions": ["angry"],
|
| 216 |
+
"contexts": ["misunderstood", "disconnected"],
|
| 217 |
+
"need": "connection",
|
| 218 |
+
"confidence_base": 0.75,
|
| 219 |
+
"reasoning": "Anger from being misunderstood or disconnected",
|
| 220 |
+
},
|
| 221 |
+
{
|
| 222 |
+
"emotions": ["angry"],
|
| 223 |
+
"contexts": ["isolated"],
|
| 224 |
+
"need": "connection",
|
| 225 |
+
"confidence_base": 0.70,
|
| 226 |
+
"reasoning": "Anger at being isolated",
|
| 227 |
+
},
|
| 228 |
+
{
|
| 229 |
+
"emotions": ["fear"],
|
| 230 |
+
"contexts": ["rejected", "abandoned"],
|
| 231 |
+
"need": "connection",
|
| 232 |
+
"confidence_base": 0.75,
|
| 233 |
+
"reasoning": "Fear of abandonment signals connection needs",
|
| 234 |
+
},
|
| 235 |
+
{
|
| 236 |
+
"emotions": ["anxious"],
|
| 237 |
+
"contexts": ["isolated", "rejected", "misunderstood"],
|
| 238 |
+
"need": "connection",
|
| 239 |
+
"confidence_base": 0.70,
|
| 240 |
+
"reasoning": "Anxiety from social isolation or rejection",
|
| 241 |
+
},
|
| 242 |
+
{
|
| 243 |
+
"emotions": ["tired"],
|
| 244 |
+
"contexts": ["isolated", "disconnected"],
|
| 245 |
+
"need": "connection",
|
| 246 |
+
"confidence_base": 0.60,
|
| 247 |
+
"reasoning": "Fatigue from lack of social connection",
|
| 248 |
+
},
|
| 249 |
+
# ========== SECURITY RULES ==========
|
| 250 |
+
# Primary emotions: Anxiety/Fear (threat/uncertainty)
|
| 251 |
+
{
|
| 252 |
+
"emotions": ["anxious"],
|
| 253 |
+
"contexts": ["uncertain", "unstable", "threatened", "loss"],
|
| 254 |
+
"need": "security",
|
| 255 |
+
"confidence_base": 0.85,
|
| 256 |
+
"reasoning": "Anxiety with uncertainty signals need for clarity/safety",
|
| 257 |
+
},
|
| 258 |
+
{
|
| 259 |
+
"emotions": ["fear"],
|
| 260 |
+
"contexts": ["threatened", "unsafe", "vulnerable"],
|
| 261 |
+
"need": "security",
|
| 262 |
+
"confidence_base": 0.90,
|
| 263 |
+
"reasoning": "Fear indicates immediate security/safety needs",
|
| 264 |
+
},
|
| 265 |
+
{
|
| 266 |
+
"emotions": ["anxious"],
|
| 267 |
+
"contexts": ["uncertain", "unstable"],
|
| 268 |
+
"need": "security",
|
| 269 |
+
"confidence_base": 0.80,
|
| 270 |
+
"reasoning": "Worry about unpredictability or lack of clarity",
|
| 271 |
+
},
|
| 272 |
+
# HIGH PRIORITY: "Can't predict" with blocked is still security (uncertainty primary)
|
| 273 |
+
{
|
| 274 |
+
"emotions": ["anxious", "fear", "sad"],
|
| 275 |
+
"contexts": ["uncertain"],
|
| 276 |
+
"need": "security",
|
| 277 |
+
"confidence_base": 0.94,
|
| 278 |
+
"reasoning": "Uncertainty and unpredictability are core security concerns",
|
| 279 |
+
},
|
| 280 |
+
# HIGH PRIORITY: "Things keep changing" with overextended is security (instability primary)
|
| 281 |
+
{
|
| 282 |
+
"emotions": ["anxious", "tired", "sad"],
|
| 283 |
+
"contexts": ["unstable"],
|
| 284 |
+
"need": "security",
|
| 285 |
+
"confidence_base": 0.94,
|
| 286 |
+
"reasoning": "Constant change and instability are security issues",
|
| 287 |
+
},
|
| 288 |
+
{
|
| 289 |
+
"emotions": ["sad", "anxious"],
|
| 290 |
+
"contexts": ["loss"],
|
| 291 |
+
"need": "security",
|
| 292 |
+
"confidence_base": 0.75,
|
| 293 |
+
"reasoning": "Grief + anxiety about losing stability",
|
| 294 |
+
},
|
| 295 |
+
{
|
| 296 |
+
"emotions": ["angry"],
|
| 297 |
+
"contexts": ["uncertain"],
|
| 298 |
+
"need": "security",
|
| 299 |
+
"confidence_base": 0.65,
|
| 300 |
+
"reasoning": "Anger from lack of clarity",
|
| 301 |
+
},
|
| 302 |
+
{
|
| 303 |
+
"emotions": ["sad"],
|
| 304 |
+
"contexts": ["uncertain", "unstable", "loss"],
|
| 305 |
+
"need": "security",
|
| 306 |
+
"confidence_base": 0.75,
|
| 307 |
+
"reasoning": "Sadness from instability or uncertainty",
|
| 308 |
+
},
|
| 309 |
+
{
|
| 310 |
+
"emotions": ["tired"],
|
| 311 |
+
"contexts": ["unstable", "threatened"],
|
| 312 |
+
"need": "security",
|
| 313 |
+
"confidence_base": 0.65,
|
| 314 |
+
"reasoning": "Exhaustion from constant instability",
|
| 315 |
+
},
|
| 316 |
+
# ========== REST RULES ==========
|
| 317 |
+
# Primary emotion: Tired (capacity overload)
|
| 318 |
+
# HIGH PRIORITY: "Can't say no", "need space", "boundaries ignored" → rest (not autonomy)
|
| 319 |
+
{
|
| 320 |
+
"emotions": ["sad", "angry", "anxious", "tired"],
|
| 321 |
+
"contexts": ["boundary_violated"],
|
| 322 |
+
"need": "rest",
|
| 323 |
+
"confidence_base": 0.95,
|
| 324 |
+
"reasoning": "Boundary violations primarily indicate need for rest/space, not control",
|
| 325 |
+
},
|
| 326 |
+
{
|
| 327 |
+
"emotions": ["tired"],
|
| 328 |
+
"contexts": ["overwhelmed", "exhausted", "overextended"],
|
| 329 |
+
"need": "rest",
|
| 330 |
+
"confidence_base": 0.85,
|
| 331 |
+
"reasoning": "Exhaustion + overwhelm indicates need for rest/boundaries",
|
| 332 |
+
},
|
| 333 |
+
{
|
| 334 |
+
"emotions": ["tired"],
|
| 335 |
+
"contexts": ["exhausted"],
|
| 336 |
+
"need": "rest",
|
| 337 |
+
"confidence_base": 0.95,
|
| 338 |
+
"reasoning": "Explicit exhaustion is clearest rest signal",
|
| 339 |
+
},
|
| 340 |
+
{
|
| 341 |
+
"emotions": ["angry"],
|
| 342 |
+
"contexts": ["exhausted", "overwhelmed"],
|
| 343 |
+
"need": "rest",
|
| 344 |
+
"confidence_base": 0.75,
|
| 345 |
+
"reasoning": "Anger from being exhausted or overwhelmed",
|
| 346 |
+
},
|
| 347 |
+
{
|
| 348 |
+
"emotions": ["anxious", "tired"],
|
| 349 |
+
"contexts": ["overwhelmed", "overextended"],
|
| 350 |
+
"need": "rest",
|
| 351 |
+
"confidence_base": 0.80,
|
| 352 |
+
"reasoning": "Anxiety + tiredness suggests overextension",
|
| 353 |
+
},
|
| 354 |
+
{
|
| 355 |
+
"emotions": ["anxious"],
|
| 356 |
+
"contexts": ["overwhelmed", "crowded"],
|
| 357 |
+
"need": "rest",
|
| 358 |
+
"confidence_base": 0.70,
|
| 359 |
+
"reasoning": "Sensory/social overwhelm signals need for space",
|
| 360 |
+
},
|
| 361 |
+
{
|
| 362 |
+
"emotions": ["angry", "tired"],
|
| 363 |
+
"contexts": ["boundary_violated", "overextended"],
|
| 364 |
+
"need": "rest",
|
| 365 |
+
"confidence_base": 0.75,
|
| 366 |
+
"reasoning": "Anger and fatigue from violated boundaries",
|
| 367 |
+
},
|
| 368 |
+
# HIGH PRIORITY: Boundary violations should map to rest when not autonomy-blocking
|
| 369 |
+
{
|
| 370 |
+
"emotions": ["sad", "angry", "anxious", "tired"],
|
| 371 |
+
"contexts": ["boundary_violated"],
|
| 372 |
+
"need": "rest",
|
| 373 |
+
"confidence_base": 0.82,
|
| 374 |
+
"reasoning": "Boundary violations primarily indicate need for rest/space",
|
| 375 |
+
},
|
| 376 |
+
{
|
| 377 |
+
"emotions": ["sad"],
|
| 378 |
+
"contexts": ["overwhelmed", "exhausted", "overextended"],
|
| 379 |
+
"need": "rest",
|
| 380 |
+
"confidence_base": 0.70,
|
| 381 |
+
"reasoning": "Sadness from depletion and overwhelm",
|
| 382 |
+
},
|
| 383 |
+
{
|
| 384 |
+
"emotions": ["fear"],
|
| 385 |
+
"contexts": ["overwhelmed", "boundary_violated"],
|
| 386 |
+
"need": "rest",
|
| 387 |
+
"confidence_base": 0.65,
|
| 388 |
+
"reasoning": "Fear from being overwhelmed or boundaries violated",
|
| 389 |
+
},
|
| 390 |
+
# ========== RECOGNITION RULES ==========
|
| 391 |
+
# Primary emotions: Sad/Angry (value not seen)
|
| 392 |
+
# HIGH PRIORITY: "Nobody notices/appreciates" is recognition, not connection
|
| 393 |
+
{
|
| 394 |
+
"emotions": ["sad", "angry", "anxious", "tired"],
|
| 395 |
+
"contexts": ["overlooked", "unappreciated"],
|
| 396 |
+
"need": "recognition",
|
| 397 |
+
"confidence_base": 0.90,
|
| 398 |
+
"reasoning": "Being overlooked or unappreciated is primarily about recognition/validation",
|
| 399 |
+
},
|
| 400 |
+
# HIGH PRIORITY: "Don't feel capable" is recognition (inadequate), not autonomy
|
| 401 |
+
{
|
| 402 |
+
"emotions": ["sad", "anxious", "fear"],
|
| 403 |
+
"contexts": ["inadequate"],
|
| 404 |
+
"need": "recognition",
|
| 405 |
+
"confidence_base": 0.92,
|
| 406 |
+
"reasoning": "Self-doubt about capability is core recognition/validation issue",
|
| 407 |
+
},
|
| 408 |
+
{
|
| 409 |
+
"emotions": ["sad"],
|
| 410 |
+
"contexts": ["unappreciated"],
|
| 411 |
+
"need": "recognition",
|
| 412 |
+
"confidence_base": 0.87,
|
| 413 |
+
"reasoning": "Sadness from not being appreciated or valued",
|
| 414 |
+
},
|
| 415 |
+
{
|
| 416 |
+
"emotions": ["sad", "angry"],
|
| 417 |
+
"contexts": ["unappreciated", "inadequate", "overlooked"],
|
| 418 |
+
"need": "recognition",
|
| 419 |
+
"confidence_base": 0.75,
|
| 420 |
+
"reasoning": "Feeling unseen or inadequate signals need for validation",
|
| 421 |
+
},
|
| 422 |
+
{
|
| 423 |
+
"emotions": ["angry"],
|
| 424 |
+
"contexts": ["underutilized", "unappreciated", "overlooked"],
|
| 425 |
+
"need": "recognition",
|
| 426 |
+
"confidence_base": 0.80,
|
| 427 |
+
"reasoning": "Your capabilities aren't being seen or valued",
|
| 428 |
+
},
|
| 429 |
+
{
|
| 430 |
+
"emotions": ["sad"],
|
| 431 |
+
"contexts": ["inadequate"],
|
| 432 |
+
"need": "recognition",
|
| 433 |
+
"confidence_base": 0.85,
|
| 434 |
+
"reasoning": "Self-doubt about competence",
|
| 435 |
+
},
|
| 436 |
+
{
|
| 437 |
+
"emotions": ["sad"],
|
| 438 |
+
"contexts": ["criticized"],
|
| 439 |
+
"need": "recognition",
|
| 440 |
+
"confidence_base": 0.70,
|
| 441 |
+
"reasoning": "Criticism without acknowledgment damages sense of value",
|
| 442 |
+
},
|
| 443 |
+
{
|
| 444 |
+
"emotions": ["angry"],
|
| 445 |
+
"contexts": ["overlooked"],
|
| 446 |
+
"need": "recognition",
|
| 447 |
+
"confidence_base": 0.75,
|
| 448 |
+
"reasoning": "Anger at not being seen or credited",
|
| 449 |
+
},
|
| 450 |
+
{
|
| 451 |
+
"emotions": ["angry"],
|
| 452 |
+
"contexts": ["criticized"],
|
| 453 |
+
"need": "recognition",
|
| 454 |
+
"confidence_base": 0.75,
|
| 455 |
+
"reasoning": "Anger at being criticized, offended, or disrespected",
|
| 456 |
+
},
|
| 457 |
+
{
|
| 458 |
+
"emotions": ["anxious"],
|
| 459 |
+
"contexts": ["inadequate", "criticized"],
|
| 460 |
+
"need": "recognition",
|
| 461 |
+
"confidence_base": 0.75,
|
| 462 |
+
"reasoning": "Anxiety from self-doubt or criticism",
|
| 463 |
+
},
|
| 464 |
+
{
|
| 465 |
+
"emotions": ["tired"],
|
| 466 |
+
"contexts": ["unappreciated", "overlooked"],
|
| 467 |
+
"need": "recognition",
|
| 468 |
+
"confidence_base": 0.65,
|
| 469 |
+
"reasoning": "Fatigue from lack of appreciation",
|
| 470 |
+
},
|
| 471 |
+
{
|
| 472 |
+
"emotions": ["fear"],
|
| 473 |
+
"contexts": ["inadequate", "criticized"],
|
| 474 |
+
"need": "recognition",
|
| 475 |
+
"confidence_base": 0.70,
|
| 476 |
+
"reasoning": "Fear of failure or judgment",
|
| 477 |
+
},
|
| 478 |
+
]
|
| 479 |
+
|
| 480 |
+
|
| 481 |
+
def detect_context(text: str) -> Dict[str, bool]:
|
| 482 |
+
"""Detect which situational contexts are present in the text."""
|
| 483 |
+
text_lower = text.lower()
|
| 484 |
+
detected = {}
|
| 485 |
+
|
| 486 |
+
for context_name, pattern in CONTEXT_PATTERNS.items():
|
| 487 |
+
detected[context_name] = bool(re.search(pattern, text_lower))
|
| 488 |
+
|
| 489 |
+
return detected
|
| 490 |
+
|
| 491 |
+
|
| 492 |
+
def infer_needs(
|
| 493 |
+
emotion_labels: List[str], text: str, valence: float = 0.0, arousal: float = 0.5
|
| 494 |
+
) -> List[Dict]:
|
| 495 |
+
"""
|
| 496 |
+
Infer psychological needs from emotions + text context.
|
| 497 |
+
|
| 498 |
+
Args:
|
| 499 |
+
emotion_labels: Primary emotions detected (e.g., ["angry", "sad"])
|
| 500 |
+
text: User's message text
|
| 501 |
+
valence: Emotional valence (-1 to 1)
|
| 502 |
+
arousal: Emotional arousal (0 to 1)
|
| 503 |
+
|
| 504 |
+
Returns:
|
| 505 |
+
List of inferred needs with confidence scores:
|
| 506 |
+
[
|
| 507 |
+
{
|
| 508 |
+
"need": "autonomy",
|
| 509 |
+
"label": "Autonomy/Control",
|
| 510 |
+
"icon": "⚖️",
|
| 511 |
+
"confidence": 0.82,
|
| 512 |
+
"reasoning": "Anger often signals blocked goals...",
|
| 513 |
+
"interventions": ["Set a boundary", ...]
|
| 514 |
+
},
|
| 515 |
+
...
|
| 516 |
+
]
|
| 517 |
+
"""
|
| 518 |
+
# Detect situational contexts
|
| 519 |
+
contexts = detect_context(text)
|
| 520 |
+
active_contexts = [ctx for ctx, present in contexts.items() if present]
|
| 521 |
+
|
| 522 |
+
# Match rules
|
| 523 |
+
inferred = []
|
| 524 |
+
seen_needs = set()
|
| 525 |
+
|
| 526 |
+
for rule in NEED_RULES:
|
| 527 |
+
# Check if emotions match (any overlap)
|
| 528 |
+
emotion_match = any(emo in emotion_labels for emo in rule["emotions"])
|
| 529 |
+
|
| 530 |
+
# Check if contexts match (any overlap)
|
| 531 |
+
context_match = any(ctx in active_contexts for ctx in rule["contexts"])
|
| 532 |
+
|
| 533 |
+
if emotion_match and context_match:
|
| 534 |
+
need_key = rule["need"]
|
| 535 |
+
|
| 536 |
+
# Avoid duplicates
|
| 537 |
+
if need_key in seen_needs:
|
| 538 |
+
continue
|
| 539 |
+
seen_needs.add(need_key)
|
| 540 |
+
|
| 541 |
+
# Calculate confidence
|
| 542 |
+
# Base confidence from rule
|
| 543 |
+
confidence = rule["confidence_base"]
|
| 544 |
+
|
| 545 |
+
# Boost if multiple emotions match
|
| 546 |
+
emotion_overlap = sum(
|
| 547 |
+
1 for emo in rule["emotions"] if emo in emotion_labels
|
| 548 |
+
)
|
| 549 |
+
if emotion_overlap > 1:
|
| 550 |
+
confidence += 0.05
|
| 551 |
+
|
| 552 |
+
# Boost if multiple contexts match
|
| 553 |
+
context_overlap = sum(
|
| 554 |
+
1 for ctx in rule["contexts"] if ctx in active_contexts
|
| 555 |
+
)
|
| 556 |
+
if context_overlap > 1:
|
| 557 |
+
confidence += 0.05
|
| 558 |
+
|
| 559 |
+
# Adjust by emotional intensity (valence magnitude + arousal)
|
| 560 |
+
intensity = (abs(valence) + arousal) / 2.0
|
| 561 |
+
if intensity > 0.6:
|
| 562 |
+
confidence += 0.05
|
| 563 |
+
|
| 564 |
+
confidence = min(1.0, confidence) # Cap at 1.0
|
| 565 |
+
|
| 566 |
+
need_info = NEEDS[need_key]
|
| 567 |
+
inferred.append(
|
| 568 |
+
{
|
| 569 |
+
"need": need_key,
|
| 570 |
+
"label": need_info["label"],
|
| 571 |
+
"icon": need_info["icon"],
|
| 572 |
+
"confidence": round(confidence, 2),
|
| 573 |
+
"reasoning": rule["reasoning"],
|
| 574 |
+
"description": need_info["description"],
|
| 575 |
+
"interventions": need_info["interventions"],
|
| 576 |
+
"matched_contexts": [
|
| 577 |
+
ctx for ctx in rule["contexts"] if ctx in active_contexts
|
| 578 |
+
],
|
| 579 |
+
"matched_emotions": [
|
| 580 |
+
emo for emo in rule["emotions"] if emo in emotion_labels
|
| 581 |
+
],
|
| 582 |
+
}
|
| 583 |
+
)
|
| 584 |
+
|
| 585 |
+
# Sort by confidence (highest first)
|
| 586 |
+
inferred.sort(key=lambda x: x["confidence"], reverse=True)
|
| 587 |
+
|
| 588 |
+
return inferred
|
| 589 |
+
|
| 590 |
+
|
| 591 |
+
def format_needs_summary(needs: List[Dict], max_display: int = 2) -> str:
|
| 592 |
+
"""
|
| 593 |
+
Format needs as a human-readable summary.
|
| 594 |
+
|
| 595 |
+
Args:
|
| 596 |
+
needs: List of inferred needs from infer_needs()
|
| 597 |
+
max_display: Maximum number of needs to show
|
| 598 |
+
|
| 599 |
+
Returns:
|
| 600 |
+
Formatted string like:
|
| 601 |
+
"⚖️ Autonomy/Control (85%) | 🗣️ Connection/Belonging (72%)"
|
| 602 |
+
"""
|
| 603 |
+
if not needs:
|
| 604 |
+
return "No specific needs detected"
|
| 605 |
+
|
| 606 |
+
top_needs = needs[:max_display]
|
| 607 |
+
parts = [
|
| 608 |
+
f"{n['icon']} {n['label']} ({int(n['confidence']*100)}%)" for n in top_needs
|
| 609 |
+
]
|
| 610 |
+
return " | ".join(parts)
|
| 611 |
+
|
| 612 |
+
|
| 613 |
+
# Example usage for testing
|
| 614 |
+
if __name__ == "__main__":
|
| 615 |
+
# Test case 1: Autonomy need
|
| 616 |
+
test1 = infer_needs(
|
| 617 |
+
emotion_labels=["angry"],
|
| 618 |
+
text="my boss won't let me make my own decisions",
|
| 619 |
+
valence=-0.5,
|
| 620 |
+
arousal=0.7,
|
| 621 |
+
)
|
| 622 |
+
print("Test 1 (Autonomy):")
|
| 623 |
+
print(format_needs_summary(test1))
|
| 624 |
+
print(test1[0] if test1 else "No needs detected")
|
| 625 |
+
print()
|
| 626 |
+
|
| 627 |
+
# Test case 2: Connection need
|
| 628 |
+
test2 = infer_needs(
|
| 629 |
+
emotion_labels=["sad"],
|
| 630 |
+
text="i feel so alone, nobody understands what i'm going through",
|
| 631 |
+
valence=-0.6,
|
| 632 |
+
arousal=0.4,
|
| 633 |
+
)
|
| 634 |
+
print("Test 2 (Connection):")
|
| 635 |
+
print(format_needs_summary(test2))
|
| 636 |
+
print(test2[0] if test2 else "No needs detected")
|
| 637 |
+
print()
|
| 638 |
+
|
| 639 |
+
# Test case 3: Rest need
|
| 640 |
+
test3 = infer_needs(
|
| 641 |
+
emotion_labels=["tired", "anxious"],
|
| 642 |
+
text="i'm so overwhelmed, there's just too much to handle",
|
| 643 |
+
valence=-0.4,
|
| 644 |
+
arousal=0.6,
|
| 645 |
+
)
|
| 646 |
+
print("Test 3 (Rest):")
|
| 647 |
+
print(format_needs_summary(test3))
|
| 648 |
+
print(test3[0] if test3 else "No needs detected")
|
utils/orchestrator.py
ADDED
|
@@ -0,0 +1,282 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# utils/orchestrator.py
|
| 2 |
+
# Coordinates emotion, memory, and reflection servers together
|
| 3 |
+
from typing import Dict, Any, Optional
|
| 4 |
+
import asyncio
|
| 5 |
+
import time
|
| 6 |
+
from utils.mcp_client import MCPMux
|
| 7 |
+
from utils.needs_lexicon import infer_needs
|
| 8 |
+
from utils.intervention_lexicon import (
|
| 9 |
+
get_interventions,
|
| 10 |
+
should_show_interventions,
|
| 11 |
+
format_interventions,
|
| 12 |
+
)
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class GhostMaloneMux:
|
| 16 |
+
"""Orchestrator for Ghost Malone's three servers: emotion, memory, reflection"""
|
| 17 |
+
|
| 18 |
+
def __init__(self):
|
| 19 |
+
self.mux = MCPMux()
|
| 20 |
+
self.initialized = False
|
| 21 |
+
|
| 22 |
+
async def initialize(self):
|
| 23 |
+
"""Connect to all three MCP servers"""
|
| 24 |
+
if self.initialized:
|
| 25 |
+
return
|
| 26 |
+
|
| 27 |
+
# Connect to emotion server
|
| 28 |
+
await self.mux.connect_stdio(
|
| 29 |
+
"emotion", "python", args=["servers/emotion_server.py"]
|
| 30 |
+
)
|
| 31 |
+
|
| 32 |
+
# Connect to memory server
|
| 33 |
+
await self.mux.connect_stdio(
|
| 34 |
+
"memory", "python", args=["servers/memory_server.py"]
|
| 35 |
+
)
|
| 36 |
+
|
| 37 |
+
# Connect to reflection server
|
| 38 |
+
await self.mux.connect_stdio(
|
| 39 |
+
"reflection", "python", args=["servers/reflection_server.py"]
|
| 40 |
+
)
|
| 41 |
+
|
| 42 |
+
self.initialized = True
|
| 43 |
+
|
| 44 |
+
async def process_message(
|
| 45 |
+
self,
|
| 46 |
+
user_text: str,
|
| 47 |
+
conversation_context: list[dict] | None = None,
|
| 48 |
+
user_id: str = "user_001",
|
| 49 |
+
intervention_thresholds: dict | None = None,
|
| 50 |
+
) -> dict:
|
| 51 |
+
"""
|
| 52 |
+
Process a user message through the full pipeline:
|
| 53 |
+
1. Emotion analysis
|
| 54 |
+
2. Needs inference
|
| 55 |
+
3. Memory operations (parallel)
|
| 56 |
+
4. Reflection generation
|
| 57 |
+
5. Intervention generation (if thresholds met)
|
| 58 |
+
|
| 59 |
+
Args:
|
| 60 |
+
user_text: The user's message
|
| 61 |
+
conversation_context: Previous messages
|
| 62 |
+
user_id: User identifier
|
| 63 |
+
intervention_thresholds: Optional dict with min_messages, min_confidence, min_arousal
|
| 64 |
+
|
| 65 |
+
Returns dict with:
|
| 66 |
+
"user_text": str,
|
| 67 |
+
"emotion": dict (emotion analysis),
|
| 68 |
+
"inferred_needs": list (psychological needs detected),
|
| 69 |
+
"emotion_arc": dict (trajectory),
|
| 70 |
+
"response": str (assistant reply),
|
| 71 |
+
"tone": str
|
| 72 |
+
}
|
| 73 |
+
"""
|
| 74 |
+
if not self.initialized:
|
| 75 |
+
await self.initialize()
|
| 76 |
+
|
| 77 |
+
# Extract intervention thresholds (SIMPLIFIED defaults for demo)
|
| 78 |
+
thresholds = intervention_thresholds or {}
|
| 79 |
+
min_messages = thresholds.get("min_messages", 2) # Just need one exchange
|
| 80 |
+
min_confidence = thresholds.get("min_confidence", 0.70) # Lower bar
|
| 81 |
+
min_arousal = thresholds.get("min_arousal", 0.40) # Catch more cases
|
| 82 |
+
|
| 83 |
+
start_time = time.time()
|
| 84 |
+
|
| 85 |
+
# Initialize toolbox log
|
| 86 |
+
toolbox_log = []
|
| 87 |
+
|
| 88 |
+
# Step 1: Analyze emotion
|
| 89 |
+
t1 = time.time()
|
| 90 |
+
toolbox_log.append(
|
| 91 |
+
"🔧 **emotion_server.analyze()** - Detecting emotions from text"
|
| 92 |
+
)
|
| 93 |
+
emotion_data = await self.mux.call(
|
| 94 |
+
"analyze", {"text": user_text, "user_id": user_id}
|
| 95 |
+
)
|
| 96 |
+
elapsed = (time.time() - t1) * 1000
|
| 97 |
+
print(f"⏱️ Emotion analysis: {elapsed:.0f}ms")
|
| 98 |
+
|
| 99 |
+
# Parse emotion response (it comes as JSON string from MCP)
|
| 100 |
+
if isinstance(emotion_data, str):
|
| 101 |
+
import json
|
| 102 |
+
|
| 103 |
+
emotion_dict = json.loads(emotion_data)
|
| 104 |
+
else:
|
| 105 |
+
emotion_dict = emotion_data
|
| 106 |
+
|
| 107 |
+
toolbox_log.append(
|
| 108 |
+
f" ✅ Found: {emotion_dict.get('labels', [])} ({elapsed:.0f}ms)"
|
| 109 |
+
)
|
| 110 |
+
|
| 111 |
+
# Step 2: Infer psychological needs from emotion + context (fast, <100ms)
|
| 112 |
+
t2 = time.time()
|
| 113 |
+
toolbox_log.append(
|
| 114 |
+
"\n📖 **needs_lexicon.infer_needs()** - Detecting psychological needs"
|
| 115 |
+
)
|
| 116 |
+
emotion_labels = emotion_dict.get("labels", [])
|
| 117 |
+
valence = emotion_dict.get("valence", 0.0)
|
| 118 |
+
arousal = emotion_dict.get("arousal", 0.0)
|
| 119 |
+
inferred_needs = infer_needs(emotion_labels, user_text, valence, arousal)
|
| 120 |
+
elapsed = (time.time() - t2) * 1000
|
| 121 |
+
print(f"⏱️ Needs inference: {elapsed:.0f}ms")
|
| 122 |
+
|
| 123 |
+
if inferred_needs:
|
| 124 |
+
need_summary = ", ".join(
|
| 125 |
+
[f"{n['label']} ({n['confidence']:.0%})" for n in inferred_needs]
|
| 126 |
+
)
|
| 127 |
+
toolbox_log.append(f" ✅ Detected: {need_summary} ({elapsed:.0f}ms)")
|
| 128 |
+
else:
|
| 129 |
+
toolbox_log.append(f" ℹ️ No strong needs detected ({elapsed:.0f}ms)")
|
| 130 |
+
|
| 131 |
+
# Debug: Print detected needs
|
| 132 |
+
print(f"🎯 Inferred needs: {inferred_needs}")
|
| 133 |
+
if inferred_needs:
|
| 134 |
+
for need in inferred_needs:
|
| 135 |
+
print(f" - {need['icon']} {need['label']} ({need['confidence']:.0%})")
|
| 136 |
+
|
| 137 |
+
# Step 3 & 4: Parallelize memory operations (they don't depend on each other)
|
| 138 |
+
t3 = time.time()
|
| 139 |
+
toolbox_log.append("\n🧠 **memory_server** - Parallel operations:")
|
| 140 |
+
toolbox_log.append(" 📥 get_emotion_arc() - Retrieving emotion history")
|
| 141 |
+
toolbox_log.append(" 💾 remember_event() - Storing current message")
|
| 142 |
+
|
| 143 |
+
emotion_arc_task = self.mux.call("get_emotion_arc", {"k": 10})
|
| 144 |
+
|
| 145 |
+
event = {
|
| 146 |
+
"text": user_text,
|
| 147 |
+
"emotion": emotion_dict,
|
| 148 |
+
"role": "user",
|
| 149 |
+
"user_id": user_id,
|
| 150 |
+
}
|
| 151 |
+
remember_task = self.mux.call("remember_event", {"event": event})
|
| 152 |
+
|
| 153 |
+
# Run both memory operations in parallel
|
| 154 |
+
emotion_arc, _ = await asyncio.gather(emotion_arc_task, remember_task)
|
| 155 |
+
elapsed = (time.time() - t3) * 1000
|
| 156 |
+
print(f"⏱️ Memory operations (parallel): {elapsed:.0f}ms")
|
| 157 |
+
toolbox_log.append(f" ✅ Completed ({elapsed:.0f}ms)")
|
| 158 |
+
|
| 159 |
+
if isinstance(emotion_arc, str):
|
| 160 |
+
import json
|
| 161 |
+
|
| 162 |
+
emotion_arc = json.loads(emotion_arc)
|
| 163 |
+
|
| 164 |
+
# Step 5: Generate reflection using emotion + arc
|
| 165 |
+
t4 = time.time()
|
| 166 |
+
toolbox_log.append(
|
| 167 |
+
"\n🤖 **reflection_server.generate()** - Claude response generation"
|
| 168 |
+
)
|
| 169 |
+
tone = emotion_dict.get("tone", "neutral")
|
| 170 |
+
reflection_response = await self.mux.call(
|
| 171 |
+
"generate",
|
| 172 |
+
{
|
| 173 |
+
"text": user_text,
|
| 174 |
+
"context": conversation_context,
|
| 175 |
+
"tone": tone,
|
| 176 |
+
"emotion_arc": emotion_arc,
|
| 177 |
+
},
|
| 178 |
+
)
|
| 179 |
+
elapsed = (time.time() - t4) * 1000
|
| 180 |
+
print(f"⏱️ Reflection generation (Claude): {elapsed:.0f}ms")
|
| 181 |
+
toolbox_log.append(f" ✅ Response generated ({elapsed:.0f}ms)")
|
| 182 |
+
|
| 183 |
+
if isinstance(reflection_response, str):
|
| 184 |
+
import json
|
| 185 |
+
|
| 186 |
+
try:
|
| 187 |
+
response_dict = json.loads(reflection_response)
|
| 188 |
+
reply = response_dict.get("reply", reflection_response)
|
| 189 |
+
except Exception:
|
| 190 |
+
reply = reflection_response
|
| 191 |
+
else:
|
| 192 |
+
reply = str(reflection_response)
|
| 193 |
+
|
| 194 |
+
# Step 6: Generate interventions (SIMPLIFIED FOR DEMO)
|
| 195 |
+
toolbox_log.append(
|
| 196 |
+
"\n💡 **intervention_lexicon** - Checking intervention criteria"
|
| 197 |
+
)
|
| 198 |
+
message_count = len(conversation_context) + 1 if conversation_context else 1
|
| 199 |
+
arousal = emotion_dict.get("arousal", 0.5)
|
| 200 |
+
|
| 201 |
+
intervention_text = ""
|
| 202 |
+
|
| 203 |
+
# Simple rule: If we detected a need, check if we should show interventions
|
| 204 |
+
if inferred_needs and should_show_interventions(
|
| 205 |
+
confidence=inferred_needs[0]["confidence"],
|
| 206 |
+
message_count=message_count,
|
| 207 |
+
emotional_intensity=arousal,
|
| 208 |
+
min_messages=min_messages,
|
| 209 |
+
min_confidence=min_confidence,
|
| 210 |
+
min_arousal=min_arousal,
|
| 211 |
+
):
|
| 212 |
+
toolbox_log.append(
|
| 213 |
+
f" ✅ Thresholds met (msg≥{min_messages}, conf≥{min_confidence:.0%}, arousal≥{min_arousal:.1f})"
|
| 214 |
+
)
|
| 215 |
+
need_type = inferred_needs[0]["need"]
|
| 216 |
+
contexts = inferred_needs[0].get("matched_contexts", [])
|
| 217 |
+
interventions = get_interventions(need_type, contexts, limit=3)
|
| 218 |
+
|
| 219 |
+
if interventions:
|
| 220 |
+
toolbox_log.append(
|
| 221 |
+
f" 📋 get_interventions() - Retrieved {len(interventions)} strategies for {need_type}"
|
| 222 |
+
)
|
| 223 |
+
intervention_text = format_interventions(
|
| 224 |
+
need_type,
|
| 225 |
+
interventions,
|
| 226 |
+
inferred_needs[0]["confidence"],
|
| 227 |
+
emotion_arc=emotion_arc,
|
| 228 |
+
user_text=user_text,
|
| 229 |
+
)
|
| 230 |
+
print(f"💡 Showing {len(interventions)} interventions for {need_type}")
|
| 231 |
+
else:
|
| 232 |
+
reasons = []
|
| 233 |
+
if not inferred_needs:
|
| 234 |
+
reasons.append("no needs detected")
|
| 235 |
+
else:
|
| 236 |
+
if message_count < min_messages:
|
| 237 |
+
reasons.append(f"msg count {message_count}<{min_messages}")
|
| 238 |
+
if inferred_needs[0]["confidence"] < min_confidence:
|
| 239 |
+
reasons.append(
|
| 240 |
+
f"confidence {inferred_needs[0]['confidence']:.0%}<{min_confidence:.0%}"
|
| 241 |
+
)
|
| 242 |
+
if arousal < min_arousal:
|
| 243 |
+
reasons.append(f"arousal {arousal:.1f}<{min_arousal:.1f}")
|
| 244 |
+
toolbox_log.append(f" ℹ️ No interventions ({', '.join(reasons)})")
|
| 245 |
+
|
| 246 |
+
# Combine empathetic response + interventions
|
| 247 |
+
full_response = reply + intervention_text
|
| 248 |
+
|
| 249 |
+
total_time = time.time() - start_time
|
| 250 |
+
print(f"⏱️ TOTAL pipeline time: {total_time*1000:.0f}ms ({total_time:.1f}s)")
|
| 251 |
+
toolbox_log.append(f"\n⏱️ **Total pipeline:** {total_time*1000:.0f}ms")
|
| 252 |
+
|
| 253 |
+
# Format toolbox log
|
| 254 |
+
toolbox_log_str = "🧰 **Toolbox Activity:**\n\n" + "\n".join(toolbox_log)
|
| 255 |
+
|
| 256 |
+
return {
|
| 257 |
+
"user_text": user_text,
|
| 258 |
+
"emotion": emotion_dict,
|
| 259 |
+
"inferred_needs": inferred_needs,
|
| 260 |
+
"emotion_arc": emotion_arc,
|
| 261 |
+
"response": full_response,
|
| 262 |
+
"tone": tone,
|
| 263 |
+
"has_interventions": bool(intervention_text),
|
| 264 |
+
"toolbox_log": toolbox_log_str,
|
| 265 |
+
}
|
| 266 |
+
|
| 267 |
+
async def close(self):
|
| 268 |
+
"""Close all server connections"""
|
| 269 |
+
await self.mux.close()
|
| 270 |
+
|
| 271 |
+
|
| 272 |
+
# Global instance
|
| 273 |
+
_orchestrator = None
|
| 274 |
+
|
| 275 |
+
|
| 276 |
+
async def get_orchestrator() -> GhostMaloneMux:
|
| 277 |
+
"""Get or create the global orchestrator"""
|
| 278 |
+
global _orchestrator
|
| 279 |
+
if _orchestrator is None:
|
| 280 |
+
_orchestrator = GhostMaloneMux()
|
| 281 |
+
await _orchestrator.initialize()
|
| 282 |
+
return _orchestrator
|
utils/reflection.py
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# utils/reflection.py
|
| 2 |
+
import os
|
| 3 |
+
|
| 4 |
+
def reflect(user_msg: str, context_messages=None) -> str:
|
| 5 |
+
"""Return Ghost Malone's reply. Uses OpenAI if OPENAI_API_KEY is set; else echoes."""
|
| 6 |
+
api_key = os.getenv("OPENAI_API_KEY")
|
| 7 |
+
system_prompt = (
|
| 8 |
+
"You are Ghost Malone — a calm, humorous listener. "
|
| 9 |
+
"Be sincere, brief (<80 words), and reflective."
|
| 10 |
+
)
|
| 11 |
+
if not api_key:
|
| 12 |
+
return f"👻 (dev) I hear you: {user_msg}"
|
| 13 |
+
|
| 14 |
+
try:
|
| 15 |
+
from openai import OpenAI
|
| 16 |
+
client = OpenAI(api_key=api_key)
|
| 17 |
+
msgs = [{"role": "system", "content": system_prompt}]
|
| 18 |
+
if context_messages:
|
| 19 |
+
msgs.extend(context_messages[-6:]) # last 3 exchanges (user/assistant)
|
| 20 |
+
msgs.append({"role": "user", "content": user_msg})
|
| 21 |
+
resp = client.chat.completions.create(
|
| 22 |
+
model="gpt-4o-mini",
|
| 23 |
+
messages=msgs,
|
| 24 |
+
max_tokens=200,
|
| 25 |
+
)
|
| 26 |
+
return resp.choices[0].message.content
|
| 27 |
+
except Exception as e:
|
| 28 |
+
return f"👻 (dev) Error talking to model: {e}\nBut I still hear you: {user_msg}"
|