CIE-Unified

git clone 

commit
70cfdf3
parent
fabbb69
author
codex@macbookpro
date
2026-04-01 11:21:57 +0800 CST
integration phase2.3: validation/context/attention sanity cleanup
6 files changed,  +414, -114
M MERGE_PLAN.md
+28, -6
 1@@ -4,11 +4,11 @@
 2 > 决策依据: Branch A 6/8, Branch B 8/8 (同一 cie-datasets formal dataset)
 3 > 策略: 方案 C — 核取 B,壳取 A
 4 >
 5-> 审核更新(2026-04-01):
 6-> `origin/integration/merge-plan @ 423d812` 已落地到 Phase 2,
 7-> 但 **Phase 3 前必须暂停**,先通过 Phase 2.1 gate:
 8-> PendingSignal 单路径收口、移除旧直接回灌路径、补 exactly-once 回归测试、
 9-> 并明确 pytest gate 与 formal validation script gate 的区别。
10+> 状态更新(2026-04-01):
11+> `origin/integration/merge-plan @ fabbb69` 已完成 Phase 2.2(Kernel Sanity Gate);
12+> `integration/phase2.3-sanity-cleanup` 已完成 Phase 2.3(validation/context/attention sanity cleanup),
13+> 并通过 quick gate、Phase 2.3 gate、broader regression gate、formal validation smoke。
14+> **Phase 3 尚未开始**;只有在 Phase 2.3 gates 全通过后,才允许进入 Phase 3。
15 
16 ---
17 
18@@ -136,7 +136,29 @@ reset_session()
19    `python3 tests/formal_validation.py`
20 6. 只有 Phase 2.1 通过,后续实现才允许进入 Phase 3
21 
22-### Phase 3: 沉积 Profile 并行观测(Day 2)
23+### Phase 2.2: Kernel Sanity Gate(已完成)
24+
25+1. 修正 Branch B 内核中 backward-weight 读取方向
26+2. 确认 Dirichlet per-node wiring 不再退化成均匀先验占位
27+3. 确认 context 参数已被 runtime 真实消费
28+4. 补 attention ledger 的基础 sanity gate
29+5. 通过 gate:`python3 -m pytest tests/test_kernel_sanity.py -q`
30+
31+### Phase 2.3: Validation / Context / Attention Sanity Cleanup(已完成)
32+
33+1. 修正 validation/test 层残留的 stale `get_bwd_weight(...)` 调用顺序
34+2. 明确 context 语义:
35+   `str` 按字符拆分;`list/tuple` 逐项消费;`dict` 稳定归一化;不支持类型显式报错
36+3. 加强 attention ledger gate,覆盖 multi-step / decay / feedback / reset cleanup 场景
37+4. 将 `tests/test_comprehensive.py` 与 `tests/formal_validation.py` 改成 repo-relative / env-aware 路径解析
38+5. 通过 gate:
39+   `python3 -m pytest tests/test_smoke.py tests/test_dynamics.py tests/test_exactly_once.py tests/test_kernel_sanity.py -q`
40+   `python3 -m pytest tests/test_phase23_validation_sanity.py -q`
41+   `python3 -m pytest tests/test_smoke.py tests/test_dynamics.py tests/test_exactly_once.py tests/test_kernel_sanity.py tests/test_comprehensive.py tests/test_phase23_validation_sanity.py -q`
42+   `python3 tests/formal_validation.py`
43+6. Phase 2.3 完成后,Phase 3 才允许开始;当前 **Phase 3 仍未开始**
44+
45+### Phase 3: 沉积 Profile 并行观测(未开始,Day 2)
46 
47 1. 从 Branch A 移植 `SedimentationProfile` dataclass 到 `cie/state.py`
48 2. 保留 Branch B 的 `experience_hits` 作为已验证行为基线;先把 Profile 做成 dual-write / parallel observation
M cie/dynamics.py
+39, -11
 1@@ -97,26 +97,33 @@ class Dynamics:
 2 
 3             spread_ratio = 0.3  # 每步传播 30% 的激活
 4             spread_amount = mu_v * spread_ratio
 5+            released = self.state.attention.release(node_id, spread_amount)
 6+            if released <= 1e-10:
 7+                continue
 8+            moved = 0.0
 9 
10             for nb in neighbors:
11                 w = self.graph.get_edge_weight(node_id, nb)
12-                flow = spread_amount * (w / total_weight)
13-                new_mu[nb] = new_mu.get(nb, 0.0) + flow
14+                desired_flow = released * (w / total_weight)
15+                actual_flow = self.state.attention.allocate(nb, desired_flow)
16+                if actual_flow <= 1e-10:
17+                    continue
18+                new_mu[nb] = new_mu.get(nb, 0.0) + actual_flow
19                 # 更新边流 J
20                 self.state.J[(node_id, nb)] = (
21-                    self.state.J.get((node_id, nb), 0.0) * 0.9 + flow
22+                    self.state.J.get((node_id, nb), 0.0) * 0.9 + actual_flow
23                 )
24-                # attention accounting: 传播到邻居的激活也要记账
25-                self.state.attention.allocate(nb, flow)
26+                moved += actual_flow
27                 # 记录经验命中
28                 self.state.experience_hits[nb] = (
29                     self.state.experience_hits.get(nb, 0) + 1
30                 )
31                 self.state.active_region.add(nb)
32 
33-            new_mu[node_id] = mu_v - spread_amount
34-            # attention accounting: 释放从源节点流出的激活
35-            self.state.attention.release(node_id, spread_amount)
36+            leftover = max(0.0, released - moved)
37+            if leftover > 1e-10:
38+                self.state.attention.allocate(node_id, leftover)
39+            new_mu[node_id] = max(0.0, new_mu.get(node_id, mu_v) - moved)
40 
41         self.state.mu.update(new_mu)
42         # 清理极低激活
43@@ -250,9 +257,30 @@ class Dynamics:
44 
45             # 归巢也微弱影响 mu:向能力核方向的节点获得微量激活
46             if nearest_core and nearest_core in self.state.ability_cores:
47-                for cn in list(self.state.ability_cores[nearest_core])[:3]:
48-                    if cn != node_id:
49-                        self.state.mu[cn] = self.state.mu.get(cn, 0.0) + abs(pull1) * 0.01
50+                targets = [cn for cn in list(self.state.ability_cores[nearest_core])[:3] if cn != node_id]
51+                if targets:
52+                    desired_total = min(
53+                        self.state.mu.get(node_id, 0.0),
54+                        abs(pull1) * 0.01 * len(targets),
55+                    )
56+                    released = self.state.attention.release(node_id, desired_total)
57+                    moved = 0.0
58+                    if released > 1e-10:
59+                        per_target = released / len(targets)
60+                        for cn in targets:
61+                            actual = self.state.attention.allocate(cn, per_target)
62+                            if actual <= 1e-10:
63+                                continue
64+                            self.state.mu[cn] = self.state.mu.get(cn, 0.0) + actual
65+                            self.state.active_region.add(cn)
66+                            moved += actual
67+                        leftover = max(0.0, released - moved)
68+                        if leftover > 1e-10:
69+                            self.state.attention.allocate(node_id, leftover)
70+                        self.state.mu[node_id] = max(
71+                            0.0,
72+                            self.state.mu.get(node_id, 0.0) - moved,
73+                        )
74 
75             if nearest_core:
76                 self.state.bound_ability_core = nearest_core
M cie/runtime.py
+48, -13
 1@@ -6,7 +6,6 @@ CIE Runtime — 统一接口(SPEC §5)
 2 """
 3 
 4 import random
 5-import math
 6 from typing import Optional
 7 from .graph import Graph
 8 from .state import CIEState, PendingSignal
 9@@ -40,13 +39,14 @@ class CIERuntime:
10         """
11         接收新的输入。创建 PendingSignal 排队,在下一步 step 时消费。
12         PendingSignal 是唯一的状态写入入口。
13+
14+        context 语义在 Phase 2.3 明确化:
15+        - str: 按字符拆成 context tokens
16+        - list/tuple: 逐项转成 token
17+        - dict: 按 key 排序后稳定归一化为 "key=value" token
18+        - 其他非 None 类型: 显式报 TypeError,避免静默忽略
19         """
20-        if isinstance(input_data, str):
21-            tokens = list(input_data)
22-        elif isinstance(input_data, (list, tuple)):
23-            tokens = list(input_data)
24-        else:
25-            tokens = [str(input_data)]
26+        tokens = self._normalize_input_tokens(input_data)
27 
28         anchor_tokens = []
29         if anchors:
30@@ -55,12 +55,7 @@ class CIERuntime:
31             elif isinstance(anchors, (list, tuple)):
32                 anchor_tokens = list(anchors)
33 
34-        context_tokens = []
35-        if context:
36-            if isinstance(context, str):
37-                context_tokens = list(context)[:10]
38-            elif isinstance(context, (list, tuple)):
39-                context_tokens = list(context)[:10]
40+        context_tokens = self._normalize_context_tokens(context)
41 
42         signal = PendingSignal(
43             source="external",
44@@ -72,6 +67,46 @@ class CIERuntime:
45         )
46         self.state.pending_signals.append(signal)
47 
48+    def _normalize_input_tokens(self, input_data):
49+        if isinstance(input_data, str):
50+            return list(input_data)
51+        if isinstance(input_data, (list, tuple)):
52+            return list(input_data)
53+        return [str(input_data)]
54+
55+    def _normalize_context_tokens(self, context):
56+        if context is None:
57+            return []
58+        if isinstance(context, str):
59+            return list(context)[:10]
60+        if isinstance(context, (list, tuple)):
61+            return [str(item) for item in list(context)[:10]]
62+        if isinstance(context, dict):
63+            tokens = []
64+            for key in sorted(context.keys(), key=lambda item: str(item)):
65+                value = context[key]
66+                if isinstance(value, str):
67+                    normalized = value[:16]
68+                elif isinstance(value, (int, float, bool)) or value is None:
69+                    normalized = str(value)
70+                elif isinstance(value, (list, tuple)):
71+                    preview = ",".join(str(item)[:8] for item in list(value)[:3])
72+                    normalized = f"[{preview}]"
73+                elif isinstance(value, dict):
74+                    normalized = "dict"
75+                else:
76+                    normalized = type(value).__name__
77+
78+                token = f"{str(key)[:12]}={normalized}"
79+                if token:
80+                    tokens.append(token[:32])
81+                if len(tokens) >= 10:
82+                    break
83+            return tokens
84+        raise TypeError(
85+            "context must be one of: str, list, tuple, dict, or None"
86+        )
87+
88     def step(self, n: int = 1):
89         """
90         推进 n 步内部动力学演化。
M tests/formal_validation.py
+151, -80
  1@@ -1,88 +1,159 @@
  2-import sys, os, json, math, time
  3-sys.path.insert(0, "/Users/george/code/CIE-Unified")
  4+import json
  5+import math
  6+import os
  7+import sys
  8+import time
  9+from pathlib import Path
 10+
 11+REPO_ROOT = Path(__file__).resolve().parents[1]
 12+if str(REPO_ROOT) not in sys.path:
 13+    sys.path.insert(0, str(REPO_ROOT))
 14+
 15 from cie import CIERuntime
 16 
 17-DS = "/Users/george/code/cie-datasets/china_text_book_md/v2026-03-28"
 18 
 19-def load_recs(stage, subject, n=80):
 20-    path = os.path.join(DS, "splits", "by_stage_subject", stage, f"{subject}.jsonl")
 21+def resolve_dataset_dir() -> Path:
 22+    env_path = os.environ.get("CIE_FORMAL_DATASET_DIR")
 23+    candidates = []
 24+    if env_path:
 25+        candidates.append(Path(env_path))
 26+    candidates.append(REPO_ROOT.parent / "cie-datasets" / "china_text_book_md" / "v2026-03-28")
 27+
 28+    for candidate in candidates:
 29+        if candidate.is_dir():
 30+            return candidate
 31+
 32+    raise FileNotFoundError(
 33+        "Formal validation dataset not found. Set CIE_FORMAL_DATASET_DIR or place "
 34+        "the dataset at ../cie-datasets/china_text_book_md/v2026-03-28 relative "
 35+        "to the repo root."
 36+    )
 37+
 38+
 39+def load_recs(dataset_dir: Path, stage: str, subject: str, n: int = 80) -> list[str]:
 40+    path = dataset_dir / "splits" / "by_stage_subject" / stage / f"{subject}.jsonl"
 41     recs = []
 42-    if not os.path.exists(path): return recs
 43-    with open(path) as f:
 44+    if not path.exists():
 45+        return recs
 46+    with open(path, encoding="utf-8") as f:
 47         for line in f:
 48             rec = json.loads(line)
 49-            if not rec.get("is_content"): continue
 50-            t = rec.get("text","")
 51-            if len(t) >= 4: recs.append(t)
 52-            if len(recs) >= n: break
 53+            if not rec.get("is_content"):
 54+                continue
 55+            text = rec.get("text", "")
 56+            if len(text) >= 4:
 57+                recs.append(text)
 58+            if len(recs) >= n:
 59+                break
 60     return recs
 61 
 62-combos = [("小学","语文"),("小学","数学"),("初中","语文"),("初中","数学"),("高中","语文")]
 63-tests = []
 64-
 65-# Pipeline + Stability per combo
 66-for stage, subj in combos:
 67-    label = stage + subj
 68-    recs = load_recs(stage, subj, 80)
 69-    if not recs:
 70-        tests.append({"name": label, "status": "FAIL", "detail": "no data"})
 71-        continue
 72-    rt = CIERuntime(seed=42)
 73-    t0 = time.time()
 74-    for r in recs:
 75-        rt.ingest(r[:60])
 76-        rt.step(n=1)
 77-    elapsed = time.time() - t0
 78-    ot = rt.emit()
 79-    if ot["activated"]:
 80-        rt.commit_feedback({"correct": [ot["activated"][0]["node"]], "reward": 1.0})
 81-    snap = rt.snapshot_state()
 82-    ok = snap["phi_summary"]["count"] > 20 and abs(snap["phi_summary"]["max"]) <= 10.1
 83-    ok &= snap["attention"]["used"] <= snap["attention"]["total"] + 0.01
 84-    ok &= all(math.isfinite(v) for v in rt.state.phi.values())
 85-    
 86-    # Emergence: top words
 87-    g = rt.graph
 88-    cn_bg = []
 89-    for se in g.fwd_edges.values():
 90-        for dst, edge in se.items():
 91-            if "\u4e00" <= edge.src <= "\u9fff" and "\u4e00" <= dst <= "\u9fff":
 92-                bwd = g.get_bwd_weight(edge.src, dst)
 93-                ratio = edge.weight / bwd if bwd > 0.01 else edge.weight * 100
 94-                cn_bg.append((edge.src+dst, round(ratio,1)))
 95-    cn_bg.sort(key=lambda x: -x[1])
 96-    
 97-    d = f"n={snap['phi_summary']['count']}, e={snap['graph']['edge_count']}, phi=[{snap['phi_summary']['min']:.3f},{snap['phi_summary']['max']:.3f}], mode={ot['mode']}, t={elapsed:.1f}s, words={cn_bg[:5]}"
 98-    tests.append({"name": label, "status": "PASS" if ok else "FAIL", "detail": d})
 99-
100-# Cross-stage
101-rt2 = CIERuntime(seed=42)
102-for stage in ["小学","初中","高中"]:
103-    for r in load_recs(stage, "语文", 30):
104-        rt2.ingest(r[:50])
105-        rt2.step(n=1)
106-s2 = rt2.snapshot_state()
107-ok2 = abs(s2["phi_summary"]["max"]) <= 10.1 and s2["phi_summary"]["count"] > 30
108-tests.append({"name": "cross_stage", "status": "PASS" if ok2 else "FAIL",
109-    "detail": f"n={s2['phi_summary']['count']}, phi={s2['phi_summary']['max']:.3f}"})
110-
111-# Cross-subject
112-rt3 = CIERuntime(seed=42)
113-for subj in ["语文","数学","科学"]:
114-    for r in load_recs("小学", subj, 30):
115-        rt3.ingest(r[:50], anchors=[subj])
116-        rt3.step(n=1)
117-s3 = rt3.snapshot_state()
118-ok3 = abs(s3["phi_summary"]["max"]) <= 10.1
119-tests.append({"name": "cross_subject", "status": "PASS" if ok3 else "FAIL",
120-    "detail": f"n={s3['phi_summary']['count']}, phi={s3['phi_summary']['max']:.3f}, cores={len(rt3.state.ability_cores)}"})
121-
122-# Summary
123-passed = sum(1 for t in tests if t["status"]=="PASS")
124-failed = sum(1 for t in tests if t["status"]=="FAIL")
125-for t in tests:
126-    print(f"[{t['status']}] {t['name']}: {t['detail']}")
127-print(f"\n总计: {passed}/{len(tests)} PASS")
128-
129-with open("/tmp/formal_val_results.json", "w") as f:
130-    json.dump({"tests": tests, "summary": {"passed": passed, "failed": failed, "total": len(tests), "dataset": DS}}, f, ensure_ascii=False, indent=2, default=str)
131+
132+def run_validation(dataset_dir: Path | None = None) -> dict:
133+    dataset_dir = Path(dataset_dir) if dataset_dir is not None else resolve_dataset_dir()
134+
135+    combos = [("小学", "语文"), ("小学", "数学"), ("初中", "语文"), ("初中", "数学"), ("高中", "语文")]
136+    tests = []
137+
138+    # Pipeline + Stability per combo
139+    for stage, subj in combos:
140+        label = stage + subj
141+        recs = load_recs(dataset_dir, stage, subj, 80)
142+        if not recs:
143+            tests.append({"name": label, "status": "FAIL", "detail": "no data"})
144+            continue
145+
146+        rt = CIERuntime(seed=42)
147+        t0 = time.time()
148+        for rec in recs:
149+            rt.ingest(rec[:60])
150+            rt.step(n=1)
151+        elapsed = time.time() - t0
152+
153+        output = rt.emit()
154+        if output["activated"]:
155+            rt.commit_feedback({"correct": [output["activated"][0]["node"]], "reward": 1.0})
156+        snap = rt.snapshot_state()
157+
158+        ok = snap["phi_summary"]["count"] > 20 and abs(snap["phi_summary"]["max"]) <= 10.1
159+        ok &= snap["attention"]["used"] <= snap["attention"]["total"] + 0.01
160+        ok &= all(math.isfinite(v) for v in rt.state.phi.values())
161+
162+        g = rt.graph
163+        cn_bg = []
164+        for src_edges in g.fwd_edges.values():
165+            for dst, edge in src_edges.items():
166+                if "\u4e00" <= edge.src <= "\u9fff" and "\u4e00" <= dst <= "\u9fff":
167+                    bwd = g.get_bwd_weight(dst, edge.src)
168+                    ratio = edge.weight / bwd if bwd > 0.01 else edge.weight * 100
169+                    cn_bg.append((edge.src + dst, round(ratio, 1)))
170+        cn_bg.sort(key=lambda item: -item[1])
171+
172+        detail = (
173+            f"n={snap['phi_summary']['count']}, e={snap['graph']['edge_count']}, "
174+            f"phi=[{snap['phi_summary']['min']:.3f},{snap['phi_summary']['max']:.3f}], "
175+            f"mode={output['mode']}, t={elapsed:.1f}s, words={cn_bg[:5]}"
176+        )
177+        tests.append({"name": label, "status": "PASS" if ok else "FAIL", "detail": detail})
178+
179+    # Cross-stage
180+    rt2 = CIERuntime(seed=42)
181+    for stage in ["小学", "初中", "高中"]:
182+        for rec in load_recs(dataset_dir, stage, "语文", 30):
183+            rt2.ingest(rec[:50])
184+            rt2.step(n=1)
185+    snap2 = rt2.snapshot_state()
186+    ok2 = abs(snap2["phi_summary"]["max"]) <= 10.1 and snap2["phi_summary"]["count"] > 30
187+    tests.append({
188+        "name": "cross_stage",
189+        "status": "PASS" if ok2 else "FAIL",
190+        "detail": f"n={snap2['phi_summary']['count']}, phi={snap2['phi_summary']['max']:.3f}",
191+    })
192+
193+    # Cross-subject
194+    rt3 = CIERuntime(seed=42)
195+    for subj in ["语文", "数学", "科学"]:
196+        for rec in load_recs(dataset_dir, "小学", subj, 30):
197+            rt3.ingest(rec[:50], anchors=[subj])
198+            rt3.step(n=1)
199+    snap3 = rt3.snapshot_state()
200+    ok3 = abs(snap3["phi_summary"]["max"]) <= 10.1
201+    tests.append({
202+        "name": "cross_subject",
203+        "status": "PASS" if ok3 else "FAIL",
204+        "detail": (
205+            f"n={snap3['phi_summary']['count']}, phi={snap3['phi_summary']['max']:.3f}, "
206+            f"cores={len(rt3.state.ability_cores)}"
207+        ),
208+    })
209+
210+    passed = sum(1 for test in tests if test["status"] == "PASS")
211+    failed = sum(1 for test in tests if test["status"] == "FAIL")
212+    return {
213+        "tests": tests,
214+        "summary": {
215+            "passed": passed,
216+            "failed": failed,
217+            "total": len(tests),
218+            "dataset": str(dataset_dir),
219+        },
220+    }
221+
222+
223+def main() -> int:
224+    try:
225+        results = run_validation()
226+    except FileNotFoundError as exc:
227+        print(f"[FAIL] dataset: {exc}")
228+        return 1
229+
230+    for test in results["tests"]:
231+        print(f"[{test['status']}] {test['name']}: {test['detail']}")
232+    print(f"\n总计: {results['summary']['passed']}/{results['summary']['total']} PASS")
233+
234+    with open("/tmp/formal_val_results.json", "w", encoding="utf-8") as f:
235+        json.dump(results, f, ensure_ascii=False, indent=2, default=str)
236+    return 0 if results["summary"]["failed"] == 0 else 1
237+
238+
239+if __name__ == "__main__":
240+    sys.exit(main())
M tests/test_comprehensive.py
+20, -4
 1@@ -24,12 +24,28 @@ import time
 2 import math
 3 import json
 4 import traceback
 5+from pathlib import Path
 6 
 7-sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
 8+REPO_ROOT = Path(__file__).resolve().parents[1]
 9+sys.path.insert(0, str(REPO_ROOT))
10 from cie import CIERuntime
11 
12 # ── 数据路径 ──
13-DATA_DIR = "/Users/george/code/china-text-book-md"
14+def resolve_textbook_dir() -> Path:
15+    env_path = os.environ.get("CIE_TEXTBOOK_DATA_DIR")
16+    candidates = []
17+    if env_path:
18+        candidates.append(Path(env_path))
19+    candidates.append(REPO_ROOT.parent / "china-text-book-md")
20+
21+    for candidate in candidates:
22+        if candidate.is_dir():
23+            return candidate
24+
25+    raise FileNotFoundError(
26+        "Textbook dataset not found. Set CIE_TEXTBOOK_DATA_DIR or place "
27+        "the dataset at ../china-text-book-md relative to the repo root."
28+    )
29 
30 TEXTBOOKS = {
31     "小学语文一上": "小学_语文_统编版_义务教育教科书·语文一年级上册.md",
32@@ -42,7 +58,7 @@ TEXTBOOKS = {
33 
34 def load_textbook(name):
35     """加载课本,提取纯文本段落(跳过 markdown 标记和乱码)"""
36-    path = os.path.join(DATA_DIR, TEXTBOOKS[name])
37+    path = resolve_textbook_dir() / TEXTBOOKS[name]
38     with open(path, "r", encoding="utf-8") as f:
39         raw = f.read()
40     
41@@ -154,7 +170,7 @@ def test_A04_chuzhong_shuxue_formula():
42     total_edges = 0
43     for src_edges in rt.graph.fwd_edges.values():
44         for dst, edge in src_edges.items():
45-            bwd_w = rt.graph.get_bwd_weight(edge.src, dst)
46+            bwd_w = rt.graph.get_bwd_weight(dst, edge.src)
47             if abs(edge.weight - bwd_w) > 0.01:
48                 asym_count += 1
49             total_edges += 1
A tests/test_phase23_validation_sanity.py
+128, -0
  1@@ -0,0 +1,128 @@
  2+import math
  3+from importlib import import_module
  4+from pathlib import Path
  5+
  6+import pytest
  7+
  8+from cie import CIERuntime
  9+
 10+
 11+REPO_ROOT = Path(__file__).resolve().parents[1]
 12+
 13+
 14+def assert_attention_matches_mu(rt: CIERuntime, tol: float = 1e-6):
 15+    mu_positive = {
 16+        node: value for node, value in rt.state.mu.items()
 17+        if value > tol
 18+    }
 19+    attn_positive = {
 20+        node: value for node, value in rt.state.attention.allocated.items()
 21+        if value > tol
 22+    }
 23+
 24+    assert set(mu_positive) == set(attn_positive), (
 25+        f"attention/mu node mismatch: mu_only={sorted(set(mu_positive) - set(attn_positive))}, "
 26+        f"attn_only={sorted(set(attn_positive) - set(mu_positive))}"
 27+    )
 28+
 29+    for node, mu_value in mu_positive.items():
 30+        assert math.isclose(mu_value, attn_positive[node], rel_tol=1e-9, abs_tol=tol), (
 31+            f"ledger mismatch at {node!r}: mu={mu_value}, attention={attn_positive[node]}"
 32+        )
 33+
 34+    assert math.isclose(
 35+        sum(mu_positive.values()),
 36+        rt.state.attention.used,
 37+        rel_tol=1e-9,
 38+        abs_tol=tol,
 39+    ), f"total mismatch: mu={sum(mu_positive.values())}, attention={rt.state.attention.used}"
 40+
 41+
 42+def test_validation_call_sites_use_correct_backward_direction():
 43+    targets = [
 44+        REPO_ROOT / "tests" / "test_comprehensive.py",
 45+        REPO_ROOT / "tests" / "formal_validation.py",
 46+    ]
 47+
 48+    for path in targets:
 49+        text = path.read_text(encoding="utf-8")
 50+        assert "get_bwd_weight(edge.src, dst)" not in text, f"stale reversed call remains in {path.name}"
 51+        assert "get_bwd_weight(dst, edge.src)" in text, f"corrected call missing in {path.name}"
 52+
 53+
 54+def test_dict_context_is_normalized_and_consumed_deterministically():
 55+    rt = CIERuntime(seed=42)
 56+    context = {
 57+        "topic": "背景",
 58+        "stage": "小学",
 59+        "tags": ["A", "B"],
 60+        "meta": {"nested": True},
 61+    }
 62+
 63+    rt.ingest("你好", context=context)
 64+    pending = rt.state.pending_signals[-1]
 65+    assert pending.context_tokens == [
 66+        "meta=dict",
 67+        "stage=小学",
 68+        "tags=[A,B]",
 69+        "topic=背景",
 70+    ]
 71+
 72+    rt.step(n=1)
 73+    for token in pending.context_tokens:
 74+        assert rt.graph.has_node(token), f"context token missing from graph: {token}"
 75+
 76+    assert rt.graph.get_edge_weight("stage=小学", "你") > 0
 77+    assert rt.graph.get_edge_weight("topic=背景", "好") > 0
 78+
 79+
 80+def test_unsupported_context_type_fails_fast():
 81+    rt = CIERuntime(seed=42)
 82+    with pytest.raises(TypeError, match="context must be one of"):
 83+        rt.ingest("你好", context=object())
 84+
 85+
 86+def test_attention_ledger_matches_mu_across_steps_feedback_and_cleanup():
 87+    rt = CIERuntime(seed=42)
 88+
 89+    for idx in range(6):
 90+        rt.ingest(
 91+            f"轮次{idx}",
 92+            context={"stage": "小学", "round": idx},
 93+            anchors=["锚点"],
 94+        )
 95+        rt.step(n=3)
 96+        assert_attention_matches_mu(rt)
 97+
 98+    output = rt.emit()
 99+    assert output is not None
100+    rt.step(n=2)
101+    assert_attention_matches_mu(rt)
102+
103+    rt.commit_feedback({"correct": list(rt.state.active_region)[:2], "reward": 1.0})
104+    rt.step(n=2)
105+    assert_attention_matches_mu(rt)
106+
107+    rt.step(n=25)
108+    assert_attention_matches_mu(rt)
109+
110+    rt.reset_session()
111+    assert_attention_matches_mu(rt)
112+    assert rt.state.attention.used == 0.0
113+    assert all(value == 0.0 for value in rt.state.mu.values())
114+
115+
116+def test_validation_path_helpers_are_env_aware(monkeypatch, tmp_path):
117+    textbook_dir = tmp_path / "textbooks"
118+    textbook_dir.mkdir()
119+    formal_dir = tmp_path / "formal"
120+    formal_dir.mkdir()
121+
122+    monkeypatch.setenv("CIE_TEXTBOOK_DATA_DIR", str(textbook_dir))
123+    monkeypatch.setenv("CIE_FORMAL_DATASET_DIR", str(formal_dir))
124+
125+    comprehensive = import_module("tests.test_comprehensive")
126+    formal_validation = import_module("tests.formal_validation")
127+
128+    assert comprehensive.resolve_textbook_dir() == textbook_dir
129+    assert formal_validation.resolve_dataset_dir() == formal_dir