- commit
- 3e97255
- parent
- 4c794d4
- author
- codex@macbookpro
- date
- 2026-03-31 16:29:52 +0800 CST
branch-a: task01 scaffold + plan/task docs
10 files changed,
+796,
-0
+9,
-0
1@@ -0,0 +1,9 @@
2+__pycache__/
3+*.py[cod]
4+.DS_Store
5+.coverage
6+.mypy_cache/
7+.pytest_cache/
8+build/
9+dist/
10+*.egg-info/
+5,
-0
1@@ -0,0 +1,5 @@
2+"""Minimal Branch A runtime scaffold."""
3+
4+from .runtime import CIERuntime
5+
6+__all__ = ["CIERuntime"]
+31,
-0
1@@ -0,0 +1,31 @@
2+from __future__ import annotations
3+
4+from dataclasses import dataclass, field
5+from typing import Dict, Iterable, List
6+
7+
8+@dataclass
9+class Graph:
10+ """Small undirected topology used by the runtime step loop."""
11+
12+ adjacency: Dict[str, Dict[str, float]] = field(default_factory=dict)
13+
14+ def ensure_node(self, node: str) -> None:
15+ self.adjacency.setdefault(node, {})
16+
17+ def add_edge(self, source: str, target: str, weight: float = 1.0) -> None:
18+ self.ensure_node(source)
19+ self.ensure_node(target)
20+ self.adjacency[source][target] = self.adjacency[source].get(target, 0.0) + weight
21+ self.adjacency[target][source] = self.adjacency[target].get(source, 0.0) + weight
22+
23+ def connect_path(self, nodes: Iterable[str], weight: float = 1.0) -> None:
24+ ordered = [node for node in nodes if node]
25+ for left, right in zip(ordered, ordered[1:]):
26+ self.add_edge(left, right, weight=weight)
27+
28+ def neighbors(self, node: str) -> Dict[str, float]:
29+ return self.adjacency.get(node, {})
30+
31+ def nodes(self) -> List[str]:
32+ return sorted(self.adjacency)
+450,
-0
1@@ -0,0 +1,450 @@
2+from __future__ import annotations
3+
4+import math
5+import re
6+from typing import Any, Dict, Iterable, List
7+
8+from .state import PendingSignal, RuntimeState
9+
10+REQUIRED_SNAPSHOT_KEYS = {
11+ "phi_summary",
12+ "mu_summary",
13+ "J_summary",
14+ "active_region",
15+ "bound_ability_core",
16+ "anchor_pull",
17+ "drift_score",
18+ "free_capacity",
19+ "experience_regions",
20+ "skill_belt_candidates",
21+ "sedimentation_trace",
22+ "merge_events",
23+ "decay_events",
24+ "output_mode",
25+ "feedback_effect",
26+}
27+
28+STAGE_ORDER = ("memory", "experience", "skill_belt", "ability_core")
29+
30+
31+def _round(value: float) -> float:
32+ return round(value, 4)
33+
34+
35+class CIERuntime:
36+ """A small graph-native runtime centered on (phi, mu, J)."""
37+
38+ def __init__(
39+ self,
40+ *,
41+ activation_retention: float = 0.58,
42+ activation_spread: float = 0.24,
43+ potential_decay: float = 0.97,
44+ flow_decay: float = 0.95,
45+ capacity_limit: float = 4.0,
46+ ) -> None:
47+ self.state = RuntimeState()
48+ self.activation_retention = activation_retention
49+ self.activation_spread = activation_spread
50+ self.potential_decay = potential_decay
51+ self.flow_decay = flow_decay
52+ self.capacity_limit = capacity_limit
53+
54+ def ingest(self, input: Any, context: Any = None, anchors: Any = None) -> Dict[str, Any]:
55+ tokens = self._tokenize(input)
56+ context_tokens = self._tokenize(context)
57+ anchor_tokens = self._tokenize(anchors)
58+ signal = PendingSignal(
59+ source="external",
60+ tokens=tokens,
61+ context_tokens=context_tokens,
62+ anchor_tokens=anchor_tokens,
63+ strength=1.0,
64+ )
65+ self.state.pending_signals.append(signal)
66+ return {
67+ "queued_tokens": list(tokens),
68+ "queued_context": list(context_tokens),
69+ "queued_anchors": list(anchor_tokens),
70+ }
71+
72+ def step(self, n: int = 1) -> Dict[str, Any]:
73+ steps = max(1, int(n))
74+ for _ in range(steps):
75+ self._advance_once()
76+ return self.snapshot_state()
77+
78+ def emit(self) -> str:
79+ active_nodes = self._top_nodes(self.state.mu, limit=3)
80+ mode = self._choose_output_mode()
81+ self.state.output_mode = mode
82+ if not active_nodes:
83+ output = "minimal: idle"
84+ feedback_tokens = ["idle"]
85+ elif mode == "full":
86+ output = "full: " + " ".join(active_nodes)
87+ feedback_tokens = active_nodes[:]
88+ elif mode == "degraded":
89+ output = "degraded: " + " ".join(active_nodes[:2])
90+ feedback_tokens = active_nodes[:2]
91+ else:
92+ output = "minimal: " + active_nodes[0]
93+ feedback_tokens = active_nodes[:1]
94+
95+ feedback_signal = PendingSignal(
96+ source="emit",
97+ tokens=feedback_tokens,
98+ strength=0.45 if mode == "full" else 0.3,
99+ )
100+ self.state.pending_signals.append(feedback_signal)
101+ self.state.last_output = output
102+ self.state.feedback_effect = {
103+ "source": "emit",
104+ "queued_tokens": list(feedback_tokens),
105+ "queued_strength": _round(feedback_signal.strength),
106+ "queued_step": self.state.step_index,
107+ "last_applied_step": self.state.feedback_effect.get("last_applied_step"),
108+ }
109+ return output
110+
111+ def commit_feedback(self, feedback: Any) -> Dict[str, Any]:
112+ payload = self._normalize_feedback(feedback)
113+ signal = PendingSignal(
114+ source="feedback",
115+ tokens=payload["tokens"],
116+ context_tokens=payload["context_tokens"],
117+ strength=payload["strength"],
118+ polarity=payload["polarity"],
119+ )
120+ self.state.pending_signals.append(signal)
121+ self.state.feedback_effect = {
122+ "source": "commit_feedback",
123+ "queued_tokens": list(signal.tokens),
124+ "queued_strength": _round(signal.strength),
125+ "polarity": signal.polarity,
126+ "queued_step": self.state.step_index,
127+ "last_applied_step": self.state.feedback_effect.get("last_applied_step"),
128+ }
129+ return dict(self.state.feedback_effect)
130+
131+ def snapshot_state(self) -> Dict[str, Any]:
132+ snapshot = {
133+ "phi_summary": self._phi_summary(),
134+ "mu_summary": self._mu_summary(),
135+ "J_summary": self._j_summary(),
136+ "active_region": list(self.state.active_region),
137+ "bound_ability_core": self.state.bound_ability_core,
138+ "anchor_pull": _round(self.state.anchor_pull),
139+ "drift_score": _round(self.state.drift_score),
140+ "free_capacity": _round(self.state.free_capacity),
141+ "experience_regions": self._experience_regions(),
142+ "skill_belt_candidates": self._skill_belt_candidates(),
143+ "sedimentation_trace": list(self.state.sedimentation_trace),
144+ "merge_events": list(self.state.merge_events),
145+ "decay_events": list(self.state.decay_events),
146+ "output_mode": self.state.output_mode,
147+ "feedback_effect": dict(self.state.feedback_effect),
148+ }
149+ missing = REQUIRED_SNAPSHOT_KEYS.difference(snapshot)
150+ if missing:
151+ raise RuntimeError(f"Missing snapshot keys: {sorted(missing)}")
152+ return snapshot
153+
154+ def reset_session(self) -> None:
155+ self.state.mu.clear()
156+ self.state.pending_signals.clear()
157+ self.state.active_region.clear()
158+ self.state.output_mode = "minimal"
159+ self.state.last_output = ""
160+ self.state.bound_ability_core = None
161+ self.state.drift_score = 0.0
162+ self.state.anchor_pull = 0.0
163+ self.state.free_capacity = 1.0
164+ self.state.feedback_effect = {
165+ "source": "reset_session",
166+ "queued_tokens": [],
167+ "queued_strength": 0.0,
168+ "last_applied_step": self.state.step_index,
169+ }
170+
171+ def _advance_once(self) -> None:
172+ self.state.step_index += 1
173+ self.state.decay_events = []
174+ pending = list(self.state.pending_signals)
175+ self.state.pending_signals.clear()
176+ for signal in pending:
177+ self._apply_signal(signal)
178+ self._propagate_activation()
179+ self._apply_homing()
180+ self._apply_decay()
181+ self._refresh_observability()
182+
183+ def _apply_signal(self, signal: PendingSignal) -> None:
184+ combined = signal.anchor_tokens + signal.context_tokens + signal.tokens
185+ for node in combined:
186+ self.state.graph.ensure_node(node)
187+ self.state.phi.setdefault(node, 0.0)
188+ self.state.mu.setdefault(node, 0.0)
189+ self.state.strata.setdefault(node, "memory")
190+ self.state.touch_count[node] = self.state.touch_count.get(node, 0) + 1
191+ for anchor in signal.anchor_tokens:
192+ self.state.anchor_nodes[anchor] = self.state.anchor_nodes.get(anchor, 0.0) + 1.0
193+ self.state.graph.connect_path(combined, weight=max(0.5, signal.strength))
194+ for left, right in zip(combined, combined[1:]):
195+ key = (left, right)
196+ self.state.J[key] = self.state.J.get(key, 0.0) + (0.35 * signal.strength)
197+ for token in signal.tokens:
198+ activation_gain = 0.9 * signal.strength
199+ potential_gain = 0.28 * signal.strength * signal.polarity
200+ self.state.mu[token] = self.state.mu.get(token, 0.0) + activation_gain
201+ self.state.phi[token] = self.state.phi.get(token, 0.0) + potential_gain
202+ self._update_sedimentation(token)
203+ if signal.polarity < 0:
204+ self._record_decay("feedback_suppression", token, activation_gain)
205+ if signal.source in {"emit", "feedback"}:
206+ self.state.feedback_effect["last_applied_step"] = self.state.step_index
207+
208+ def _propagate_activation(self) -> None:
209+ next_mu: Dict[str, float] = {}
210+ for node, activation in list(self.state.mu.items()):
211+ if activation <= 0.0:
212+ continue
213+ retained = activation * self.activation_retention
214+ next_mu[node] = next_mu.get(node, 0.0) + retained
215+ neighbors = self.state.graph.neighbors(node)
216+ if neighbors:
217+ total_weight = sum(neighbors.values()) or 1.0
218+ for neighbor, weight in neighbors.items():
219+ spread = activation * self.activation_spread * (weight / total_weight)
220+ next_mu[neighbor] = next_mu.get(neighbor, 0.0) + spread
221+ self.state.J[(node, neighbor)] = self.state.J.get((node, neighbor), 0.0) + spread * 0.1
222+ self.state.phi[node] = self.state.phi.get(node, 0.0) + retained * 0.06
223+ self.state.mu = next_mu
224+
225+ def _apply_homing(self) -> None:
226+ core = self._select_bound_core()
227+ self.state.bound_ability_core = core
228+ if not core or not self.state.mu:
229+ self.state.anchor_pull = 0.0
230+ return
231+ anchor_factor = 0.05 if self.state.anchor_nodes else 0.02
232+ moved = 0.0
233+ for node, activation in list(self.state.mu.items()):
234+ if node == core or activation <= 0.0:
235+ continue
236+ shift = activation * anchor_factor
237+ if shift <= 0.0:
238+ continue
239+ self.state.mu[node] = max(0.0, activation - shift)
240+ self.state.mu[core] = self.state.mu.get(core, 0.0) + shift
241+ moved += shift
242+ self.state.anchor_pull = moved
243+
244+ def _apply_decay(self) -> None:
245+ for node, value in list(self.state.phi.items()):
246+ decayed = value * self.potential_decay
247+ if decayed < value - 0.01:
248+ self._record_decay("phi", node, value - decayed)
249+ if abs(decayed) < 0.015:
250+ self.state.phi.pop(node, None)
251+ continue
252+ self.state.phi[node] = decayed
253+ for node, value in list(self.state.mu.items()):
254+ decayed = value * 0.92
255+ if decayed < value - 0.01:
256+ self._record_decay("mu", node, value - decayed)
257+ if decayed < 0.05:
258+ self.state.mu.pop(node, None)
259+ continue
260+ self.state.mu[node] = decayed
261+ for edge, value in list(self.state.J.items()):
262+ decayed = value * self.flow_decay
263+ if decayed < value - 0.01:
264+ self._record_decay("J", f"{edge[0]}->{edge[1]}", value - decayed)
265+ if decayed < 0.03:
266+ self.state.J.pop(edge, None)
267+ continue
268+ self.state.J[edge] = decayed
269+
270+ def _refresh_observability(self) -> None:
271+ self.state.active_region = self._top_nodes(self.state.mu, limit=4)
272+ self.state.bound_ability_core = self._select_bound_core()
273+ self.state.drift_score = self._compute_drift_score()
274+ total_activation = sum(self.state.mu.values())
275+ self.state.free_capacity = max(0.0, 1.0 - min(total_activation / self.capacity_limit, 1.0))
276+ self.state.output_mode = self._choose_output_mode()
277+
278+ def _update_sedimentation(self, node: str) -> None:
279+ old_stage = self.state.strata.get(node, "memory")
280+ touches = self.state.touch_count.get(node, 0)
281+ phi = self.state.phi.get(node, 0.0)
282+ if touches >= 5 or phi >= 1.2:
283+ new_stage = "ability_core"
284+ elif touches >= 3 or phi >= 0.8:
285+ new_stage = "skill_belt"
286+ elif touches >= 2 or phi >= 0.3:
287+ new_stage = "experience"
288+ else:
289+ new_stage = "memory"
290+ if STAGE_ORDER.index(new_stage) > STAGE_ORDER.index(old_stage):
291+ self.state.strata[node] = new_stage
292+ event = {
293+ "step": self.state.step_index,
294+ "node": node,
295+ "from": old_stage,
296+ "to": new_stage,
297+ }
298+ self.state.sedimentation_trace.append(event)
299+ self.state.sedimentation_trace = self.state.sedimentation_trace[-12:]
300+ if new_stage == "ability_core":
301+ self.state.merge_events.append(
302+ {"step": self.state.step_index, "node": node, "event": "skill_to_core"}
303+ )
304+ self.state.merge_events = self.state.merge_events[-8:]
305+
306+ def _record_decay(self, kind: str, target: str, amount: float) -> None:
307+ self.state.decay_events.append(
308+ {
309+ "step": self.state.step_index,
310+ "kind": kind,
311+ "target": target,
312+ "amount": _round(amount),
313+ }
314+ )
315+ self.state.decay_events = self.state.decay_events[-12:]
316+
317+ def _select_bound_core(self) -> str | None:
318+ candidates = {
319+ node: self.state.phi.get(node, 0.0) + (0.4 * self.state.touch_count.get(node, 0))
320+ for node in self.state.graph.nodes()
321+ }
322+ if not candidates:
323+ return None
324+ return max(candidates, key=lambda node: (candidates[node], self.state.mu.get(node, 0.0), node))
325+
326+ def _compute_drift_score(self) -> float:
327+ active_total = sum(self.state.mu.values())
328+ if active_total <= 0.0:
329+ return 0.0
330+ core = self.state.bound_ability_core
331+ if not core:
332+ return 0.0
333+ in_core = self.state.mu.get(core, 0.0)
334+ if self.state.anchor_nodes:
335+ in_core += sum(self.state.mu.get(anchor, 0.0) for anchor in self.state.anchor_nodes)
336+ spread_penalty = 0.12 * max(0, len(self.state.mu) - 1)
337+ drift = max(0.0, 1.0 - min(in_core / active_total, 1.0) + spread_penalty)
338+ return min(drift, 1.0)
339+
340+ def _choose_output_mode(self) -> str:
341+ if not self.state.mu:
342+ return "minimal"
343+ if self.state.free_capacity < 0.2 or self.state.drift_score > 0.85:
344+ return "minimal"
345+ if self.state.free_capacity < 0.55 or self.state.drift_score > 0.45:
346+ return "degraded"
347+ return "full"
348+
349+ def _phi_summary(self) -> Dict[str, Any]:
350+ total = sum(self.state.phi.values())
351+ return {
352+ "node_count": len(self.state.phi),
353+ "total_potential": _round(total),
354+ "top_nodes": self._top_scored(self.state.phi),
355+ }
356+
357+ def _mu_summary(self) -> Dict[str, Any]:
358+ total = sum(self.state.mu.values())
359+ return {
360+ "active_count": len(self.state.mu),
361+ "total_activation": _round(total),
362+ "top_nodes": self._top_scored(self.state.mu),
363+ }
364+
365+ def _j_summary(self) -> Dict[str, Any]:
366+ total = sum(self.state.J.values())
367+ ordered = sorted(self.state.J.items(), key=lambda item: (-item[1], item[0]))
368+ top_flows = [
369+ {"edge": f"{source}->{target}", "flow": _round(flow)}
370+ for (source, target), flow in ordered[:5]
371+ ]
372+ return {
373+ "edge_count": len(self.state.J),
374+ "total_flow": _round(total),
375+ "top_flows": top_flows,
376+ }
377+
378+ def _experience_regions(self) -> List[Dict[str, Any]]:
379+ items: List[Dict[str, Any]] = []
380+ for node, stage in self.state.strata.items():
381+ if stage not in {"experience", "skill_belt", "ability_core"}:
382+ continue
383+ items.append(
384+ {
385+ "region": node,
386+ "stage": stage,
387+ "activation": _round(self.state.mu.get(node, 0.0)),
388+ }
389+ )
390+ return sorted(items, key=lambda item: (-item["activation"], item["region"]))[:6]
391+
392+ def _skill_belt_candidates(self) -> List[Dict[str, Any]]:
393+ items: List[Dict[str, Any]] = []
394+ for node in self.state.graph.nodes():
395+ stage = self.state.strata.get(node, "memory")
396+ if stage not in {"skill_belt", "ability_core"} and self.state.touch_count.get(node, 0) < 2:
397+ continue
398+ score = self.state.phi.get(node, 0.0) + (0.25 * self.state.touch_count.get(node, 0))
399+ items.append(
400+ {
401+ "node": node,
402+ "score": _round(score),
403+ "stage": stage,
404+ }
405+ )
406+ return sorted(items, key=lambda item: (-item["score"], item["node"]))[:6]
407+
408+ def _top_scored(self, values: Dict[str, float], limit: int = 5) -> List[Dict[str, Any]]:
409+ ordered = sorted(values.items(), key=lambda item: (-item[1], item[0]))
410+ return [{"node": node, "value": _round(value)} for node, value in ordered[:limit]]
411+
412+ def _top_nodes(self, values: Dict[str, float], limit: int = 5) -> List[str]:
413+ ordered = sorted(values.items(), key=lambda item: (-item[1], item[0]))
414+ return [node for node, _ in ordered[:limit]]
415+
416+ def _normalize_feedback(self, feedback: Any) -> Dict[str, Any]:
417+ if isinstance(feedback, dict):
418+ text = " ".join(
419+ str(feedback.get(key, ""))
420+ for key in ("text", "label", "note")
421+ if feedback.get(key) is not None
422+ )
423+ polarity = -1 if float(feedback.get("value", 1.0)) < 0 else 1
424+ strength = max(0.2, min(1.2, abs(float(feedback.get("value", 1.0)))))
425+ context_tokens = self._tokenize(feedback.get("context"))
426+ else:
427+ text = str(feedback)
428+ polarity = 1
429+ strength = 0.8
430+ context_tokens = []
431+ tokens = self._tokenize(text) or ["feedback"]
432+ return {
433+ "tokens": tokens,
434+ "context_tokens": context_tokens,
435+ "strength": strength,
436+ "polarity": polarity,
437+ }
438+
439+ def _tokenize(self, payload: Any) -> List[str]:
440+ if payload is None:
441+ return []
442+ if isinstance(payload, str):
443+ text = payload
444+ elif isinstance(payload, dict):
445+ text = " ".join(str(value) for value in payload.values() if value is not None)
446+ elif isinstance(payload, Iterable):
447+ text = " ".join(str(item) for item in payload if item is not None)
448+ else:
449+ text = str(payload)
450+ tokens = [token.lower() for token in re.findall(r"\w+", text, flags=re.UNICODE)]
451+ return tokens[:8]
+40,
-0
1@@ -0,0 +1,40 @@
2+from __future__ import annotations
3+
4+from dataclasses import dataclass, field
5+from typing import Dict, List, Tuple
6+
7+from .graph import Graph
8+
9+
10+@dataclass
11+class PendingSignal:
12+ source: str
13+ tokens: List[str]
14+ context_tokens: List[str] = field(default_factory=list)
15+ anchor_tokens: List[str] = field(default_factory=list)
16+ strength: float = 1.0
17+ polarity: int = 1
18+
19+
20+@dataclass
21+class RuntimeState:
22+ phi: Dict[str, float] = field(default_factory=dict)
23+ mu: Dict[str, float] = field(default_factory=dict)
24+ J: Dict[Tuple[str, str], float] = field(default_factory=dict)
25+ graph: Graph = field(default_factory=Graph)
26+ anchor_nodes: Dict[str, float] = field(default_factory=dict)
27+ strata: Dict[str, str] = field(default_factory=dict)
28+ touch_count: Dict[str, int] = field(default_factory=dict)
29+ pending_signals: List[PendingSignal] = field(default_factory=list)
30+ sedimentation_trace: List[dict] = field(default_factory=list)
31+ merge_events: List[dict] = field(default_factory=list)
32+ decay_events: List[dict] = field(default_factory=list)
33+ feedback_effect: dict = field(default_factory=dict)
34+ active_region: List[str] = field(default_factory=list)
35+ output_mode: str = "minimal"
36+ last_output: str = ""
37+ step_index: int = 0
38+ bound_ability_core: str | None = None
39+ drift_score: float = 0.0
40+ anchor_pull: float = 0.0
41+ free_capacity: float = 1.0
+23,
-0
1@@ -0,0 +1,23 @@
2+# Branch A Plan
3+
4+## Positioning
5+
6+Branch A is the pure graph-native minimal runtime line:
7+
8+- minimal runtime centered on the graph itself
9+- canonical state centered on `(phi, mu, J)`
10+- few parameters and little magic
11+- strong observability from the first runnable version
12+- no `exact_text_map` as the inference engine
13+- no MoE disguised as multi-core cognition
14+
15+## Planned Tasks
16+
17+1. **Task 01: scaffold + observability**
18+ Build the repo-native docs, minimal runtime package, and smoke tests with locked interface coverage.
19+2. **Task 02: minimal dynamics + feedback loop + decay + degraded output**
20+ Tighten the step loop, make homing/decay more expressive, and harden degraded emission behavior.
21+3. **Task 03: sedimentation path + skill belt candidates + merge/decay events**
22+ Expand the memory-to-experience-to-skill-to-core path and make event traces more meaningful.
23+4. **Task 04: unified validation/reporting against locked spec**
24+ Produce the comparable validation and reporting layer required by the locked docs.
+11,
-0
1@@ -0,0 +1,11 @@
2+[build-system]
3+requires = ["setuptools>=68"]
4+build-backend = "setuptools.build_meta"
5+
6+[project]
7+name = "cie-unified"
8+version = "0.1.0"
9+description = "Branch A minimal graph-native CIE runtime scaffold"
10+readme = "README.md"
11+requires-python = ">=3.11"
12+dependencies = []
1@@ -0,0 +1,140 @@
2+# Task 01: Branch A Round 1 Scaffold
3+
4+## Title
5+
6+Task 01: scaffold + observability
7+
8+## Direct Prompt
9+
10+Establish the repo-native Branch A workflow, create the planning/task docs, implement the smallest runnable Python scaffold for Task 01, add minimal smoke tests, and record execution notes after validation.
11+
12+## Suggested Branch Name
13+
14+`branch-a/task01-round1-scaffold`
15+
16+## Goal
17+
18+Create a small, inspectable, graph-native runtime baseline for Branch A that satisfies the locked interface and observability requirements without trying to implement the full CIE theory in round 1.
19+
20+## Background
21+
22+This round must follow the locked conceptual and engineering constraints in:
23+
24+- `/Users/george/code/CIE-Unified/README.md`
25+- `/Users/george/code/CIE-Unified/LOCKED_IMPLEMENTATION_SPEC.md`
26+
27+The runtime must stay graph-native, keep `(phi, mu, J)` as the canonical state, include real output-to-input feedback, and include real decay/forgetting behavior.
28+
29+## Involved Repo
30+
31+- `/Users/george/code/CIE-Unified`
32+
33+## Scope
34+
35+- create Branch A repo-native planning/task docs
36+- add minimal packaging and ignore rules
37+- implement a stdlib-only runtime scaffold
38+- expose the locked runtime interface
39+- add smoke tests with `unittest`
40+- record validation notes after execution
41+
42+## Allowed Modifications
43+
44+- `/Users/george/code/CIE-Unified/plans/2026-03-31_branch_a_plan.md`
45+- `/Users/george/code/CIE-Unified/tasks/2026-03-31_task01_branch_a_round1_scaffold.md`
46+- `/Users/george/code/CIE-Unified/pyproject.toml`
47+- `/Users/george/code/CIE-Unified/.gitignore`
48+- `/Users/george/code/CIE-Unified/cie/__init__.py`
49+- `/Users/george/code/CIE-Unified/cie/state.py`
50+- `/Users/george/code/CIE-Unified/cie/graph.py`
51+- `/Users/george/code/CIE-Unified/cie/runtime.py`
52+- `/Users/george/code/CIE-Unified/tests/__init__.py`
53+- `/Users/george/code/CIE-Unified/tests/test_smoke.py`
54+
55+## Avoid Modifying
56+
57+- `/Users/george/code/CIE-Unified/README.md`
58+- `/Users/george/code/CIE-Unified/LOCKED_IMPLEMENTATION_SPEC.md`
59+
60+## Must Complete
61+
62+- create the plan doc and task doc in-repo
63+- implement the minimal Branch A runtime scaffold
64+- add smoke tests for the locked interface and key round-1 behaviors
65+- run the recommended validation command
66+- update this document with execution notes and results
67+
68+## Acceptance Criteria
69+
70+1. A runtime package exists.
71+2. The canonical runtime state is centered on `(phi, mu, J)`.
72+3. `CIERuntime` exposes:
73+ - `ingest(input, context=None, anchors=None)`
74+ - `step(n=1)`
75+ - `emit()`
76+ - `commit_feedback(feedback)`
77+ - `snapshot_state()`
78+ - `reset_session()`
79+4. `snapshot_state()` returns all locked required keys:
80+ - `phi_summary`
81+ - `mu_summary`
82+ - `J_summary`
83+ - `active_region`
84+ - `bound_ability_core`
85+ - `anchor_pull`
86+ - `drift_score`
87+ - `free_capacity`
88+ - `experience_regions`
89+ - `skill_belt_candidates`
90+ - `sedimentation_trace`
91+ - `merge_events`
92+ - `decay_events`
93+ - `output_mode`
94+ - `feedback_effect`
95+5. `ingest()` + `step()` causes observable state change.
96+6. `emit()` participates in a real output-to-input feedback placeholder.
97+7. Real decay/forgetting behavior exists.
98+8. Smoke tests pass.
99+
100+## Evaluation Requirements
101+
102+- use only the Python standard library in the runtime
103+- keep the implementation small and inspectable
104+- avoid fake ontology via latent vectors
105+- avoid `exact_text_map`
106+- avoid MoE-style hidden substitution
107+- prefer observable state summaries over opaque internals
108+
109+## Recommended Validation Command
110+
111+`python3 -m unittest discover -s tests -v`
112+
113+## Delivery Requirements
114+
115+- keep Branch A aligned with the locked docs
116+- commit on `branch-a/task01-round1-scaffold`
117+- push the branch to `origin`
118+- include an execution record with branch/base/backup/validation/results/limitations
119+
120+## Execution Record
121+
122+- actual branch name: `branch-a/task01-round1-scaffold`
123+- base commit: `4c794d418f4c3332006035490bcce1370bb7f0ab`
124+- backup path used for dirty-worktree handling: `/Users/george/code/_codex_backups/CIE-Unified/20260331-161757`
125+- files changed:
126+ - `/Users/george/code/CIE-Unified/.gitignore`
127+ - `/Users/george/code/CIE-Unified/pyproject.toml`
128+ - `/Users/george/code/CIE-Unified/plans/2026-03-31_branch_a_plan.md`
129+ - `/Users/george/code/CIE-Unified/tasks/2026-03-31_task01_branch_a_round1_scaffold.md`
130+ - `/Users/george/code/CIE-Unified/cie/__init__.py`
131+ - `/Users/george/code/CIE-Unified/cie/graph.py`
132+ - `/Users/george/code/CIE-Unified/cie/state.py`
133+ - `/Users/george/code/CIE-Unified/cie/runtime.py`
134+ - `/Users/george/code/CIE-Unified/tests/__init__.py`
135+ - `/Users/george/code/CIE-Unified/tests/test_smoke.py`
136+- validation command run: `python3 -m unittest discover -s tests -v`
137+- concise test result summary: `Ran 6 smoke tests; all passed.`
138+- known limitations deferred to Task 02:
139+ - homing and decay are real but intentionally heuristic and lightweight
140+ - emit feedback is structural placeholder feedback, not yet richer semantic feedback
141+ - task switching, stronger degraded-output behavior, and more expressive dynamics remain for the next round
+1,
-0
1@@ -0,0 +1 @@
2+"""Test package for the minimal Branch A scaffold."""
+86,
-0
1@@ -0,0 +1,86 @@
2+from __future__ import annotations
3+
4+import unittest
5+
6+from cie import CIERuntime
7+
8+
9+class RuntimeSmokeTests(unittest.TestCase):
10+ def test_runtime_initializes(self) -> None:
11+ runtime = CIERuntime()
12+ snapshot = runtime.snapshot_state()
13+ self.assertEqual(snapshot["output_mode"], "minimal")
14+ self.assertEqual(snapshot["active_region"], [])
15+ self.assertEqual(snapshot["phi_summary"]["node_count"], 0)
16+
17+ def test_locked_interface_exists(self) -> None:
18+ runtime = CIERuntime()
19+ for name in (
20+ "ingest",
21+ "step",
22+ "emit",
23+ "commit_feedback",
24+ "snapshot_state",
25+ "reset_session",
26+ ):
27+ self.assertTrue(callable(getattr(runtime, name)))
28+
29+ def test_ingest_and_step_change_state(self) -> None:
30+ runtime = CIERuntime()
31+ before = runtime.snapshot_state()
32+ runtime.ingest("graph native scaffold", context="task01", anchors="branch-a")
33+ after = runtime.step()
34+ self.assertNotEqual(before["phi_summary"]["node_count"], after["phi_summary"]["node_count"])
35+ self.assertGreater(after["mu_summary"]["total_activation"], 0.0)
36+ self.assertTrue(after["active_region"])
37+
38+ def test_snapshot_state_returns_required_keys(self) -> None:
39+ runtime = CIERuntime()
40+ runtime.ingest("observe state")
41+ snapshot = runtime.step()
42+ expected = {
43+ "phi_summary",
44+ "mu_summary",
45+ "J_summary",
46+ "active_region",
47+ "bound_ability_core",
48+ "anchor_pull",
49+ "drift_score",
50+ "free_capacity",
51+ "experience_regions",
52+ "skill_belt_candidates",
53+ "sedimentation_trace",
54+ "merge_events",
55+ "decay_events",
56+ "output_mode",
57+ "feedback_effect",
58+ }
59+ self.assertEqual(set(snapshot), expected)
60+
61+ def test_emit_participates_in_feedback_state(self) -> None:
62+ runtime = CIERuntime()
63+ runtime.ingest("feedback loop", anchors="anchor")
64+ runtime.step()
65+ before = runtime.snapshot_state()
66+ output = runtime.emit()
67+ queued = runtime.snapshot_state()
68+ runtime.step()
69+ after = runtime.snapshot_state()
70+ self.assertTrue(output)
71+ self.assertEqual(queued["feedback_effect"]["source"], "emit")
72+ self.assertTrue(queued["feedback_effect"]["queued_tokens"])
73+ self.assertGreater(after["phi_summary"]["total_potential"], before["phi_summary"]["total_potential"])
74+
75+ def test_decay_events_can_occur(self) -> None:
76+ runtime = CIERuntime()
77+ runtime.ingest("decay path")
78+ runtime.step()
79+ for _ in range(4):
80+ runtime.step()
81+ snapshot = runtime.snapshot_state()
82+ self.assertTrue(snapshot["decay_events"])
83+ self.assertLess(snapshot["mu_summary"]["total_activation"], 2.0)
84+
85+
86+if __name__ == "__main__":
87+ unittest.main()