im_wower
·
2026-04-01
run_all.py
1#!/usr/bin/env python3
2"""
3CIE 对抗性验证 — 6个反例测试
4只读不写:不改 cie/ 代码,结果写到 reports/adversarial/
5"""
6import sys, json, math, os, time
7sys.path.insert(0, "/Users/george/code/CIE-Unified")
8
9from cie import CIERuntime
10
11REPORT_DIR = "/Users/george/code/CIE-Unified/reports/adversarial"
12os.makedirs(REPORT_DIR, exist_ok=True)
13
14results = []
15
16def record(test_name, status, data, conclusion):
17 r = {"test_name": test_name, "status": status, "data": data, "conclusion": conclusion}
18 results.append(r)
19 print(f"\n{'='*60}")
20 print(f"[{status}] {test_name}: {conclusion}")
21 print(f"{'='*60}\n")
22 return r
23
24
25# ═══════════════════════════════════════
26# 反例 1: 洗脑攻击(拓扑不可逆验证)
27# ═══════════════════════════════════════
28print("\n>>> 反例 1: 洗脑攻击")
29
30rt = CIERuntime(seed=42)
31
32# Phase A: 用真实课本风格数据建立非对称结构
33textbook_phrases = [
34 "大家好", "小明上学", "老师教书", "学生读课文",
35 "春天来了", "花开了", "小鸟飞来飞去", "太阳出来了",
36 "妈妈做饭", "爸爸上班", "我爱祖国", "天安门广场",
37 "一二三四五", "上山打老虎", "小白兔白又白",
38 "两只耳朵竖起来", "爱吃萝卜和青菜",
39 "春眠不觉晓", "处处闻啼鸟", "床前明月光",
40]
41
42for i in range(200):
43 phrase = textbook_phrases[i % len(textbook_phrases)]
44 rt.ingest(phrase)
45 rt.step(n=3)
46
47# Record pre-brainwash state for key bigrams
48key_bigrams = [("大", "家"), ("小", "明"), ("春", "天"), ("老", "师"), ("妈", "妈")]
49pre_state = {}
50for a, b in key_bigrams:
51 if rt.graph.has_node(a) and rt.graph.has_node(b):
52 fwd = rt.graph.get_edge_weight(a, b)
53 bwd = rt.graph.get_bwd_weight(b, a)
54 circ = rt.graph.circulation([a, b, a])
55 pre_state[(a, b)] = {"fwd": fwd, "bwd": bwd, "circulation": circ, "asym": fwd - bwd}
56
57# Phase B: 用完全对称数据洗脑 — 每个 bigram 正反方向同等频率
58symmetric_phrases = []
59for a, b in key_bigrams:
60 symmetric_phrases.append(a + b) # 正向
61 symmetric_phrases.append(b + a) # 反向
62
63for i in range(200):
64 phrase = symmetric_phrases[i % len(symmetric_phrases)]
65 rt.ingest(phrase)
66 rt.step(n=3)
67
68# Record post-brainwash state
69post_state = {}
70brainwash_data = {"pre": {}, "post": {}, "changes": {}}
71circulation_survived = 0
72total_tested = 0
73
74for a, b in key_bigrams:
75 if (a, b) in pre_state and rt.graph.has_node(a) and rt.graph.has_node(b):
76 fwd = rt.graph.get_edge_weight(a, b)
77 bwd = rt.graph.get_bwd_weight(b, a)
78 circ = rt.graph.circulation([a, b, a])
79 post_state[(a, b)] = {"fwd": fwd, "bwd": bwd, "circulation": circ, "asym": fwd - bwd}
80
81 key = f"{a}->{b}"
82 brainwash_data["pre"][key] = pre_state[(a, b)]
83 brainwash_data["post"][key] = post_state[(a, b)]
84
85 pre_circ = abs(pre_state[(a, b)]["circulation"])
86 post_circ = abs(circ)
87 ratio = post_circ / pre_circ if pre_circ > 1e-10 else 0
88 brainwash_data["changes"][key] = {
89 "circulation_ratio": ratio,
90 "circulation_survived": post_circ > 0.01
91 }
92 total_tested += 1
93 if post_circ > 0.01:
94 circulation_survived += 1
95
96if total_tested > 0:
97 survival_rate = circulation_survived / total_tested
98 if survival_rate > 0.6:
99 record("brainwash_attack", "SYSTEM_ROBUST", brainwash_data,
100 f"拓扑不可逆成立: {circulation_survived}/{total_tested} bigram 的 circulation 在洗脑后幸存")
101 elif survival_rate > 0.2:
102 record("brainwash_attack", "PARTIAL", brainwash_data,
103 f"部分不可逆: {circulation_survived}/{total_tested} bigram 幸存,soft clamp 可能掩盖动力学")
104 else:
105 record("brainwash_attack", "CONFIRMED_VULNERABILITY", brainwash_data,
106 f"系统可被洗脑: 仅 {circulation_survived}/{total_tested} bigram 幸存")
107else:
108 record("brainwash_attack", "PARTIAL", brainwash_data, "无有效 bigram 可测")
109
110
111# ═══════════════════════════════════════
112# 反例 2: 热度≠技能(闭环缺失检测)
113# ═══════════════════════════════════════
114print("\n>>> 反例 2: 热度≠技能")
115
116rt2 = CIERuntime(seed=42)
117
118# 构造线性链 A→B→C→D(无闭环)
119rt2.graph.add_edge("A", "B", weight=2.0, bwd_weight=0.5)
120rt2.graph.add_edge("B", "C", weight=2.0, bwd_weight=0.5)
121rt2.graph.add_edge("C", "D", weight=2.0, bwd_weight=0.5)
122for n in ["A", "B", "C", "D"]:
123 rt2.state.init_node(n)
124
125# 反复激活 B 节点 100 次
126for i in range(100):
127 rt2.state.experience_hits["B"] = rt2.state.experience_hits.get("B", 0) + 1
128 rt2.dynamics.sediment()
129
130# 检查 B 是否成为 skill_belt 候选
131b_in_skill = "B" in rt2.state.skill_belt_candidates
132b_score = rt2.state.skill_belt_candidates.get("B", 0.0)
133
134# 检查环流 — 线性链不应有净环流
135circ_ABCDA = 0.0
136if all(rt2.graph.has_node(n) for n in ["A", "B", "C", "D"]):
137 # A→B→C→D 不是闭环,但我们检查 B 的参与
138 circ_ABC = rt2.graph.circulation(["A", "B", "C", "A"]) # A→B→C→A(C→A 没有边,应为 0)
139
140skill_data = {
141 "B_in_skill_belt": b_in_skill,
142 "B_score": b_score,
143 "B_hits": rt2.state.experience_hits.get("B", 0),
144 "circulation_ABC": circ_ABC,
145 "has_closed_loop": False,
146 "sediment_threshold": rt2.dynamics.sediment_threshold,
147 "skill_belt_threshold": rt2.dynamics.skill_belt_threshold,
148}
149
150if b_in_skill:
151 record("hotness_not_skill", "CONFIRMED_VULNERABILITY", skill_data,
152 f"B 成为 skill_belt 候选 (score={b_score:.3f}),但 B 不参与闭环——沉积只看 hits 不看 circulation")
153else:
154 record("hotness_not_skill", "SYSTEM_ROBUST", skill_data,
155 "B 未成为 skill_belt 候选,沉积机制正确排除了非闭环节点")
156
157
158# ═══════════════════════════════════════
159# 反例 3: 锚点撕裂
160# ═══════════════════════════════════════
161print("\n>>> 反例 3: 锚点撕裂")
162
163rt3 = CIERuntime(seed=42)
164
165# 构造两个互不连通的子图
166# Cluster 1: X1-X2-X3 高置信度
167for n in ["X1", "X2", "X3"]:
168 rt3.graph.add_node(n, label=n)
169 rt3.state.init_node(n, phi_val=10.0)
170 rt3.state.update_confidence(n, 2, amount=50.0) # 高锚点置信度
171rt3.graph.add_edge("X1", "X2", weight=3.0, bwd_weight=1.0)
172rt3.graph.add_edge("X2", "X3", weight=3.0, bwd_weight=1.0)
173
174# Cluster 2: Y1-Y2-Y3 高置信度,但 phi 值在负区间
175for n in ["Y1", "Y2", "Y3"]:
176 rt3.graph.add_node(n, label=n)
177 rt3.state.init_node(n, phi_val=-10.0)
178 rt3.state.update_confidence(n, 2, amount=50.0)
179rt3.graph.add_edge("Y1", "Y2", weight=3.0, bwd_weight=1.0)
180rt3.graph.add_edge("Y2", "Y3", weight=3.0, bwd_weight=1.0)
181
182# Put both clusters into anchor_nodes
183rt3.state.anchor_nodes = {"X1", "X2", "X3", "Y1", "Y2", "Y3"}
184
185# Create ability cores for both clusters
186rt3.state.ability_cores["core_X"] = {"X1", "X2", "X3"}
187rt3.state.ability_cores["core_Y"] = {"Y1", "Y2", "Y3"}
188
189# Place a middle node M at phi=0 (equidistant in phi-space)
190rt3.graph.add_node("M", label="M")
191rt3.state.init_node("M", phi_val=0.0)
192rt3.state.activate("M", 50.0)
193rt3.state.active_region.add("M")
194
195# Record initial
196m_phi_before = rt3.state.phi.get("M", 0.0)
197
198# Run homing 50 steps
199phi_trace = [m_phi_before]
200for _ in range(50):
201 rt3.dynamics.homing()
202 phi_trace.append(rt3.state.phi.get("M", 0.0))
203
204m_phi_after = rt3.state.phi.get("M", 0.0)
205bound_core = rt3.state.bound_ability_core
206
207# Analysis: M should move toward one cluster, not oscillate
208oscillation_count = 0
209for i in range(2, len(phi_trace)):
210 if (phi_trace[i] - phi_trace[i-1]) * (phi_trace[i-1] - phi_trace[i-2]) < 0:
211 oscillation_count += 1
212
213anchor_data = {
214 "m_phi_before": m_phi_before,
215 "m_phi_after": m_phi_after,
216 "bound_core": bound_core,
217 "oscillation_count": oscillation_count,
218 "phi_trace_first10": phi_trace[:10],
219 "phi_trace_last10": phi_trace[-10:],
220 "x_center_phi": 10.0,
221 "y_center_phi": -10.0,
222}
223
224if abs(m_phi_after) > 3.0 and oscillation_count < 10:
225 record("anchor_tear", "SYSTEM_ROBUST", anchor_data,
226 f"M 归向 {bound_core} (phi={m_phi_after:.2f}),振荡 {oscillation_count} 次,未被撕裂")
227elif oscillation_count > 20:
228 record("anchor_tear", "CONFIRMED_VULNERABILITY", anchor_data,
229 f"M 严重振荡 ({oscillation_count} 次),被两个锚点撕裂")
230else:
231 record("anchor_tear", "PARTIAL", anchor_data,
232 f"M 最终 phi={m_phi_after:.2f},振荡 {oscillation_count} 次,归巢不够果断")
233
234
235# ═══════════════════════════════════════
236# 反例 4: 注意力饥饿
237# ═══════════════════════════════════════
238print("\n>>> 反例 4: 注意力饥饿")
239
240rt4 = CIERuntime(seed=42)
241hunger_data = {"traces": []}
242
243for round_num in range(500):
244 rt4.ingest(f"token_{round_num % 50}")
245 rt4.step(n=1)
246
247 if round_num % 50 == 0:
248 snap = {
249 "round": round_num,
250 "attention_free": rt4.state.attention.free,
251 "attention_used": rt4.state.attention.used,
252 "mu_sum": sum(rt4.state.mu.values()),
253 "active_count": len(rt4.state.active_region),
254 "output_mode": rt4.state.output_mode,
255 "node_count": rt4.graph.node_count,
256 }
257 hunger_data["traces"].append(snap)
258 print(f" Round {round_num}: free={snap['attention_free']:.1f}, "
259 f"active={snap['active_count']}, mode={snap['output_mode']}")
260
261# Final state
262final_free = rt4.state.attention.free
263crashed = False
264try:
265 rt4.ingest("final_test")
266 rt4.step(n=1)
267 out = rt4.emit()
268except Exception as e:
269 crashed = True
270 hunger_data["crash_error"] = str(e)
271
272hunger_data["final_free"] = final_free
273hunger_data["crashed"] = crashed
274
275if crashed:
276 record("attention_hunger", "CONFIRMED_VULNERABILITY", hunger_data,
277 "系统在注意力耗尽后崩溃")
278elif final_free <= 0:
279 record("attention_hunger", "PARTIAL", hunger_data,
280 f"注意力耗尽 (free={final_free:.1f}) 但未崩溃,需检查降级行为")
281else:
282 record("attention_hunger", "SYSTEM_ROBUST", hunger_data,
283 f"500轮后注意力仍有余量 (free={final_free:.1f}),衰减/回收正常")
284
285
286# ═══════════════════════════════════════
287# 反例 5: 空图自激
288# ═══════════════════════════════════════
289print("\n>>> 反例 5: 空图自激")
290
291rt5 = CIERuntime(seed=42)
292rt5.ingest("种子数据测试")
293rt5.step(n=3)
294
295selfexcite_data = {"traces": []}
296for round_num in range(50):
297 out = rt5.emit()
298 rt5.step(n=3)
299
300 active_count = len(rt5.state.active_region)
301 mu_sum = sum(rt5.state.mu.values())
302 selfexcite_data["traces"].append({
303 "round": round_num,
304 "active_count": active_count,
305 "mu_sum": round(mu_sum, 6),
306 })
307
308# Analysis: should decay toward zero
309final_mu = sum(rt5.state.mu.values())
310final_active = len(rt5.state.active_region)
311peak_mu = max(t["mu_sum"] for t in selfexcite_data["traces"])
312last5_mu = [t["mu_sum"] for t in selfexcite_data["traces"][-5:]]
313is_growing = all(last5_mu[i] <= last5_mu[i+1] for i in range(len(last5_mu)-1)) and last5_mu[-1] > last5_mu[0] * 1.1
314
315selfexcite_data["final_mu"] = final_mu
316selfexcite_data["final_active"] = final_active
317selfexcite_data["is_growing"] = is_growing
318
319if is_growing and final_mu > 1.0:
320 record("empty_self_excite", "CONFIRMED_VULNERABILITY", selfexcite_data,
321 f"系统自激:最终 mu_sum={final_mu:.4f},持续增长")
322elif final_mu < 0.01 and final_active <= 1:
323 record("empty_self_excite", "SYSTEM_ROBUST", selfexcite_data,
324 f"正常衰减:最终 mu_sum={final_mu:.6f},active={final_active}")
325else:
326 record("empty_self_excite", "PARTIAL", selfexcite_data,
327 f"部分衰减:mu_sum={final_mu:.4f},active={final_active},未完全归零")
328
329
330# ═══════════════════════════════════════
331# 反例 6: Hopf 分岔参数扫描
332# ═══════════════════════════════════════
333print("\n>>> 反例 6: Hopf 分岔参数扫描")
334
335hopf_data = {"scans": []}
336
337for bwd_w in [1.0, 0.8, 0.6, 0.5, 0.4, 0.35, 0.3, 0.2, 0.1, 0.0]:
338 rt6 = CIERuntime(seed=42)
339
340 # 构造 5 节点环 A→B→C→D→E→A
341 nodes = ["HA", "HB", "HC", "HD", "HE"]
342 for i in range(5):
343 src, dst = nodes[i], nodes[(i+1) % 5]
344 rt6.graph.add_edge(src, dst, weight=1.0, bwd_weight=bwd_w)
345 rt6.state.init_node(src, phi_val=0.1 * (i+1))
346
347 # 给初始激活
348 rt6.state.activate("HA", 10.0)
349 rt6.state.active_region.add("HA")
350
351 # 记录 circulation 和 phi 振荡
352 phi_history = []
353 circ_history = []
354
355 for step in range(100):
356 rt6.dynamics.step()
357 circ = rt6.graph.circulation(["HA", "HB", "HC", "HD", "HE", "HA"])
358 phi_ha = rt6.state.phi.get("HA", 0.0)
359 phi_history.append(phi_ha)
360 circ_history.append(circ)
361
362 # Measure oscillation amplitude in last 30 steps
363 last30 = phi_history[-30:]
364 if len(last30) > 2:
365 amplitude = max(last30) - min(last30)
366 else:
367 amplitude = 0.0
368
369 final_circ = circ_history[-1]
370
371 scan_result = {
372 "bwd_weight": bwd_w,
373 "asym_scale": 1.0 - bwd_w,
374 "final_circulation": round(final_circ, 6),
375 "phi_amplitude_last30": round(amplitude, 6),
376 "phi_last5": [round(x, 4) for x in phi_history[-5:]],
377 }
378 hopf_data["scans"].append(scan_result)
379 print(f" bwd={bwd_w:.1f} asym={1-bwd_w:.1f}: circ={final_circ:.4f}, amp={amplitude:.4f}")
380
381# Find bifurcation point
382bifurcation_point = None
383for i in range(1, len(hopf_data["scans"])):
384 prev_amp = hopf_data["scans"][i-1]["phi_amplitude_last30"]
385 curr_amp = hopf_data["scans"][i]["phi_amplitude_last30"]
386 if prev_amp < 0.01 and curr_amp > 0.01:
387 bifurcation_point = hopf_data["scans"][i]["asym_scale"]
388 break
389
390hopf_data["bifurcation_point"] = bifurcation_point
391
392if bifurcation_point and 0.3 <= bifurcation_point <= 0.5:
393 record("hopf_bifurcation", "SYSTEM_ROBUST", hopf_data,
394 f"Hopf 分岔点在 asym_scale ≈ {bifurcation_point:.2f},与 README 预测 (0.35-0.40) 一致")
395elif bifurcation_point:
396 record("hopf_bifurcation", "PARTIAL", hopf_data,
397 f"Hopf 分岔点在 asym_scale ≈ {bifurcation_point:.2f},偏离 README 预测")
398else:
399 record("hopf_bifurcation", "PARTIAL", hopf_data,
400 "未检测到明确的分岔跃迁,可能需要更精细的参数扫描")
401
402
403# ═══════════════════════════════════════
404# 写报告
405# ═══════════════════════════════════════
406print("\n\n" + "="*60)
407print("写入报告...")
408
409# JSON report
410with open(os.path.join(REPORT_DIR, "results.json"), "w") as f:
411 json.dump(results, f, indent=2, ensure_ascii=False, default=str)
412
413# Summary markdown
414summary_lines = [
415 "# CIE 对抗性验证报告",
416 f"\n> 日期: 2026-04-01",
417 f"> 分支: test/adversarial-verification",
418 f"> 基准: integration/merge-plan @ 318f967",
419 "",
420 "## 总览",
421 "",
422 "| # | 测试 | 状态 | 结论 |",
423 "|---|------|------|------|",
424]
425
426for i, r in enumerate(results, 1):
427 status_emoji = {"SYSTEM_ROBUST": "✅", "PARTIAL": "⚠️", "CONFIRMED_VULNERABILITY": "❌"}.get(r["status"], "?")
428 summary_lines.append(f"| {i} | {r['test_name']} | {status_emoji} {r['status']} | {r['conclusion']} |")
429
430summary_lines.extend([
431 "",
432 "## 详细结果",
433 "",
434])
435
436for r in results:
437 summary_lines.append(f"### {r['test_name']}")
438 summary_lines.append(f"\n**状态**: {r['status']}")
439 summary_lines.append(f"\n**结论**: {r['conclusion']}")
440 summary_lines.append(f"\n**关键数据**:")
441 summary_lines.append(f"```json")
442 # Only dump key data, not full traces
443 compact = {k: v for k, v in r["data"].items() if k != "traces"}
444 if "traces" in r["data"]:
445 traces = r["data"]["traces"]
446 if len(traces) > 6:
447 compact["traces_sample"] = traces[:3] + traces[-3:]
448 else:
449 compact["traces"] = traces
450 summary_lines.append(json.dumps(compact, indent=2, ensure_ascii=False, default=str))
451 summary_lines.append(f"```\n")
452
453with open(os.path.join(REPORT_DIR, "summary.md"), "w") as f:
454 f.write("\n".join(summary_lines))
455
456print(f"\n报告已写入: {REPORT_DIR}/")
457print("results.json + summary.md")
458print("\nDONE")